Exemplo n.º 1
0
def dump_output_results(results,
                        container,
                        test_file,
                        snapshot,
                        filesystem=True):
    """ Dump the report in the json format for test execution results."""
    od = OrderedDict()
    od["$schema"] = ""
    od["contentVersion"] = "1.0.0.0"
    od["fileType"] = OUTPUT
    od["timestamp"] = int(time.time() * 1000)
    od["snapshot"] = snapshot
    od["container"] = container
    dblog = get_dblogger()
    od["log"] = dblog if dblog else ""
    if filesystem:
        test_file_parts = test_file.rsplit('/', 1)
        od["test"] = test_file_parts[-1]
        output_file = '%s/output-%s' % (test_file_parts[0],
                                        test_file_parts[-1])
        od["results"] = results
        save_json_to_file(od, output_file)
    else:
        od["test"] = test_file
        od["results"] = results
        del od["$schema"]
        doc = json_record(container, OUTPUT, test_file, od)
        dbname = config_value(DATABASE, DBNAME)
        collection = config_value(DATABASE, collectiontypes[OUTPUT])
        insert_one_document(doc, collection, dbname)
Exemplo n.º 2
0
def generate_crawler_run_output(container):
    """
    This creates a entry in the output collection, whenever a crawler runs
    to fetch data. 
    """
    timestamp = int(time.time() * 1000)
    sort = [sort_field('timestamp', False)]
    qry = {'container': container}
    output_collection = config_value(DATABASE, collectiontypes[OUTPUT])
    dbname = config_value(DATABASE, DBNAME)

    collection = config_value(DATABASE, collectiontypes[MASTERTEST])
    tests = get_documents(collection,
                          dbname=dbname,
                          sort=sort,
                          query=qry,
                          _id=True)
    master_tests = [{
        "id": str(test['_id']),
        "name": test['name']
    } for test in tests]

    mastersp_collection = config_value(DATABASE,
                                       collectiontypes[MASTERSNAPSHOT])
    snapshots = get_documents(mastersp_collection,
                              dbname=dbname,
                              sort=sort,
                              query=qry,
                              _id=True)
    master_snapshots = [{
        "id": str(snapshot['_id']),
        "name": snapshot['name']
    } for snapshot in snapshots]

    db_record = {
        "timestamp": timestamp,
        "checksum": hashlib.md5("{}".encode('utf-8')).hexdigest(),
        "collection": output_collection,
        "container": container,
        "name": "Crawlertest_%s" % (container),
        "type": "crawlerOutput",
        "json": {
            "container": container,
            "contentVersion": "",
            "fileType": "output",
            "snapshot": "",
            "test": "Crawlertest_%s" % (container),
            "log": get_dblog_handler().get_log_collection(),
            "timestamp": timestamp,
            "master_test_list": master_tests,
            "master_snapshot_list": master_snapshots,
            "output_type": "crawlerrun",
            "results": []
        }
    }
    insert_one_document(db_record, db_record['collection'], dbname, False)
Exemplo n.º 3
0
def create_container_json_to_db(dbname):
    container_json = {'filterType': 'container', 'containers': []}
    container = {
        'collection': 'structures',
        'container': '',
        'name': '',
        'type': 'container',
        'timestamp': int(datetime.datetime.now().timestamp() * 1000),
        'json': container_json
    }
    insert_one_document(container, container['collection'], dbname, False)
Exemplo n.º 4
0
    def store_data_node(self, data):
        """ Store to database"""
        if get_collection_size(data['collection']) == 0:
            # Creating indexes for collection
            create_indexes(data['collection'], config_value(DATABASE, DBNAME),
                           [('snapshotId', pymongo.ASCENDING),
                            ('timestamp', pymongo.DESCENDING)])

            create_indexes(data['collection'], config_value(DATABASE, DBNAME),
                           [('_id', pymongo.DESCENDING),
                            ('timestamp', pymongo.DESCENDING),
                            ('snapshotId', pymongo.ASCENDING)])
        insert_one_document(data,
                            data['collection'],
                            self.dbname,
                            check_keys=False)
Exemplo n.º 5
0
def test_mongoconnection(monkeypatch):
    monkeypatch.setattr('processor.database.database.config_value', mock_config_value)
    monkeypatch.setattr('processor.database.database.get_dburl', mock_get_dburl)
    monkeypatch.setattr('processor.database.database.MongoClient', MyMongoClient)
    from processor.database.database import mongoconnection, mongodb, init_db,\
        get_collection, collection_names, insert_one_document, insert_documents,\
        check_document, get_documents, count_documents, index_information, distinct_documents
    # assert MONGO is None
    mongo = mongoconnection()
    assert mongo is not None
    dbname = 'abcd'
    testdb = mongodb(dbname)
    assert testdb is not None
    testdb = mongodb()
    assert testdb is not None
    val = testdb['abcd']
    assert val is not None
    init_db()
    coll = get_collection(dbname, 'a1')
    assert coll is not None
    colls = collection_names(dbname)
    assert colls is not None
    val = insert_one_document({'a':'b'}, 'a1', dbname)
    assert val is not None
    val = distinct_documents('a1', 'a', dbname)
    assert val is not None
    with warnings.catch_warnings():
        warnings.filterwarnings("ignore", category=DeprecationWarning)
        val = insert_one_document({'a': 'b'}, 'a1', dbname, False)
        assert val is not None
    val = '123456789012345678901234' # 24 character string
    doc = check_document('a1', val, dbname)
    assert doc is not None
    vals = insert_documents([{'a': 'b'}, {'c': 'd'}], 'a1', dbname)
    assert len(vals) == 2
    vals = get_documents('a1',dbname=dbname, sort=None)
    assert vals is not None
    vals = get_documents('a1', dbname=dbname, sort='abcd')
    assert vals is not None
    count = count_documents('a1', dbname=dbname)
    assert type(count) is int
    assert count > 0
    info = index_information('a1', dbname)
    assert info is not None
    mongo.drop_database(dbname)
Exemplo n.º 6
0
    def store_data_record(self):
        """
        creates the indexes on collection and stores the data record in database or creates
        the generated snapshot at file system
        """
        data_record = self.create_database_record()
        if get_dbtests():
            if get_collection_size(data_record['collection']) == 0:
                #Creating indexes for collection
                create_indexes(data_record['collection'],
                               config_value(DATABASE, DBNAME),
                               [('snapshotId', pymongo.ASCENDING),
                                ('timestamp', pymongo.DESCENDING)])

                create_indexes(data_record['collection'],
                               config_value(DATABASE, DBNAME),
                               [('_id', pymongo.DESCENDING),
                                ('timestamp', pymongo.DESCENDING),
                                ('snapshotId', pymongo.ASCENDING)])
            insert_one_document(data_record,
                                data_record['collection'],
                                self.dbname,
                                check_keys=False)
        else:
            snapshot_dir = make_snapshots_dir(self.container)
            if snapshot_dir:
                store_snapshot(snapshot_dir, data_record)

        if 'masterSnapshotId' in self.node:
            self.snapshot_data[
                self.node['snapshotId']] = self.node['masterSnapshotId']
        else:
            self.snapshot_data[self.node['snapshotId']] = False if (
                'error' in data_record and data_record['error']) else True

        self.node['status'] = 'active'
Exemplo n.º 7
0
def populate_custom_snapshot(snapshot, container=None):
    """ Populates the resources from git."""
    dbname = config_value('MONGODB', 'dbname')
    snapshot_source = get_field_value(snapshot, 'source')
    connector_data = get_from_currentdata('connector')
    if connector_data:
        sub_data = get_custom_data(connector_data)
        if not sub_data:
            logger.error("No connector data found in '%s'", connector_data)
    else:
        sub_data = get_custom_data(snapshot_source)
    snapshot_nodes = get_field_value(snapshot, 'nodes')
    snapshot_data, valid_snapshotids = validate_snapshot_nodes(snapshot_nodes)
    if valid_snapshotids and sub_data and snapshot_nodes:
        baserepo, repopath = _get_repo_path(sub_data, snapshot)
        if repopath:
            brnch = get_field_value_with_default(sub_data, 'branchName', 'master')
            for node in snapshot_nodes:
                node_type = node['type'] if 'type' in node and node['type'] else ''
                if node_type in TEMPLATE_NODE_TYPES:
                    template_data = {
                        "container" : container,
                        "dbname" : dbname,
                        "snapshot_source" : snapshot_source,
                        "connector_data" : sub_data,
                        "snapshot_data" : snapshot_data,
                        "repopath" : repopath,
                        "snapshot" : snapshot
                    }
                    template_processor = TEMPLATE_NODE_TYPES[node_type](node, **template_data)
                    if 'snapshotId' in node:
                        snapshot_data = template_processor.populate_template_snapshot()
                    elif 'masterSnapshotId' in node:
                        snapshot_data = template_processor.populate_all_template_snapshot()
                elif 'paths' in node:
                    logger.error("ERROR: Invalid json : `%s` is not a valid node type." % (node_type))
                else:
                    # logger.debug(node)
                    # data = get_node(repopath, node, snapshot_source, brnch)
                    # if data:
                    #     insert_one_document(data, data['collection'], dbname)
                    #     snapshot_data[node['snapshotId']] = True
                    validate = node['validate'] if 'validate' in node else True
                    if 'snapshotId' in node:
                        logger.debug(node)
                        data = get_node(repopath, node, snapshot, brnch, sub_data)
                        if data:
                            if validate:
                                if get_dbtests():
                                    if get_collection_size(data['collection']) == 0:
                                        #Creating indexes for collection
                                        create_indexes(data['collection'],
                                            config_value(DATABASE, DBNAME), 
                                            [('snapshotId', pymongo.ASCENDING),
                                            ('timestamp', pymongo.DESCENDING)])
                                            
                                        create_indexes(
                                            data['collection'], 
                                            config_value(DATABASE, DBNAME), 
                                            [
                                                ('_id', pymongo.DESCENDING),
                                                ('timestamp', pymongo.DESCENDING),
                                                ('snapshotId', pymongo.ASCENDING)
                                            ]
                                        )
                                    insert_one_document(data, data['collection'], dbname)
                                else:
                                    snapshot_dir = make_snapshots_dir(container)
                                    if snapshot_dir:
                                        store_snapshot(snapshot_dir, data)
                                if 'masterSnapshotId' in node:
                                    snapshot_data[node['snapshotId']] = node['masterSnapshotId']
                                else:
                                    snapshot_data[node['snapshotId']] = True
                            # else:
                            #     snapshot_data[node['snapshotId']] = False
                            node['status'] = 'active'
                        else:
                            node['status'] = 'inactive'
                        logger.debug('Type: %s', type(data))
                    elif 'masterSnapshotId' in node:
                        alldata = get_all_nodes(repopath, node, snapshot, brnch, sub_data)
                        if alldata:
                            snapshot_data[node['masterSnapshotId']] = []
                            for data in alldata:
                                snapshot_data[node['masterSnapshotId']].append(
                                    {
                                        'snapshotId': data['snapshotId'],
                                        'path': data['path'],
                                        'validate': validate
                                    })
                        logger.debug('Type: %s', type(alldata))
        if baserepo and os.path.exists(baserepo):
            # logger.info('\t\tCLEANING Repo: %s', baserepo)
            shutil.rmtree(baserepo)
    return snapshot_data
Exemplo n.º 8
0
def populate_google_snapshot(snapshot, container=None):
    """
    This is an entrypoint for populating a snapshot of type google.
    All snapshot connectors should take snapshot object and based on
    'source' field create a method to connect to the service for the
    connector.
    The 'source' field could be used by more than one snapshot, so the
    'testuser' attribute should match to the user the 'source'
    """
    dbname = config_value('MONGODB', 'dbname')
    snapshot_source = get_field_value(snapshot, 'source')
    snapshot_user = get_field_value(snapshot, 'testUser')
    project_id = get_field_value(snapshot, 'project-id')
    sub_data = get_google_data(snapshot_source)
    snapshot_nodes = get_field_value(snapshot, 'nodes')
    snapshot_data, valid_snapshotids = validate_snapshot_nodes(snapshot_nodes)
    if valid_snapshotids and sub_data and snapshot_nodes:
        logger.debug(sub_data)
        try:
            for node in snapshot['nodes']:
                validate = node['validate'] if 'validate' in node else True
                logger.info(node)
                node_type = get_field_value_with_default(node, 'type',"")
                credentials = get_google_client_data(sub_data, snapshot_user, node_type, project_id)
                if not credentials:
                    logger.info("No  GCE connection in the snapshot to access Google resource!...")
                    return snapshot_data
                if 'snapshotId' in node:
                    if validate:
                        data = get_node(credentials, node, snapshot_source, snapshot)
                        if data:
                            error_str = data.pop('error', None)
                            if get_dbtests():
                                if get_collection_size(data['collection']) == 0:
                                    #Creating indexes for collection
                                    create_indexes(
                                        data['collection'], 
                                        config_value(DATABASE, DBNAME), 
                                        [
                                            ('snapshotId', pymongo.ASCENDING),
                                            ('timestamp', pymongo.DESCENDING)
                                        ]
                                    )
                                    create_indexes(
                                        data['collection'], 
                                        config_value(DATABASE, DBNAME), 
                                        [
                                            ('_id', pymongo.DESCENDING),
                                            ('timestamp', pymongo.DESCENDING),
                                            ('snapshotId', pymongo.ASCENDING)
                                        ]
                                    )
                                insert_one_document(data, data['collection'], dbname)
                            else:
                                snapshot_dir = make_snapshots_dir(container)
                                if snapshot_dir:
                                    store_snapshot(snapshot_dir, data)
                    
                            if 'masterSnapshotId' in node:
                                snapshot_data[node['snapshotId']] = node['masterSnapshotId']
                            else:
                                snapshot_data[node['snapshotId']] = False if error_str else True
                        else:
                            node['status'] = 'inactive'
                elif 'masterSnapshotId' in node:
                        data = get_all_nodes(credentials, node, snapshot_source, snapshot, snapshot_data)
                        logger.debug('Type: %s', type(data))
        except Exception as ex:
            logger.info('Unable to create Google client: %s', ex)
            raise ex
    return snapshot_data
Exemplo n.º 9
0
def populate_azure_snapshot(snapshot, container=None, snapshot_type='azure'):
    """ Populates the resources from azure."""
    dbname = config_value('MONGODB', 'dbname')
    snapshot_source = get_field_value(snapshot, 'source')
    snapshot_user = get_field_value(snapshot, 'testUser')
    snapshot_nodes = get_field_value(snapshot, 'nodes')
    snapshot_data, valid_snapshotids = validate_snapshot_nodes(snapshot_nodes)
    client_id, client_secret, sub_name, sub_id, tenant_id = \
        get_web_client_data(snapshot_type, snapshot_source, snapshot_user)
    if not client_id:
        # logger.info("No client_id in the snapshot to access azure resource!...")
        raise Exception("No client id in the snapshot to access azure resource!...")

    # Read the client secrets from envirnment variable
    if not client_secret:
        client_secret = os.getenv(snapshot_user, None)
        if client_secret:
            logger.info('Client Secret from environment variable, Secret: %s', '*' * len(client_secret))
        
    # Read the client secrets from the vault
    if not client_secret:
        client_secret = get_vault_data(client_id)
        if client_secret:
            logger.info('Client Secret from Vault, Secret: %s', '*' * len(client_secret))
        elif get_from_currentdata(CUSTOMER):
            logger.error("Client Secret key does not set in a vault")
            raise Exception("Client Secret key does not set in a vault")

    if not client_secret:
        raise Exception("No `client_secret` key in the connector file to access azure resource!...")

    logger.info('\t\tSubscription: %s', sub_id)
    logger.info('\t\tTenant: %s', tenant_id)
    logger.info('\t\tclient: %s', client_id)
    put_in_currentdata('clientId', client_id)
    put_in_currentdata('clientSecret', client_secret)
    put_in_currentdata('subscriptionId', sub_id)
    put_in_currentdata('tenant_id', tenant_id)
    token = get_access_token()
    logger.debug('TOKEN: %s', token)
    if not token:
        logger.info("Unable to get access token, will not run tests....")
        raise Exception("Unable to get access token, will not run tests....")
        # return {}

    # snapshot_nodes = get_field_value(snapshot, 'nodes')
    # snapshot_data, valid_snapshotids = validate_snapshot_nodes(snapshot_nodes)
    if valid_snapshotids and token and snapshot_nodes:
        for node in snapshot_nodes:
            validate = node['validate'] if 'validate' in node else True
            if 'path' in  node:
                data = get_node(token, sub_name, sub_id, node, snapshot_user, snapshot_source)
                if data:
                    if validate:
                        if get_dbtests():
                            if get_collection_size(data['collection']) == 0:
                                # Creating indexes for collection
                                create_indexes(
                                    data['collection'], 
                                    config_value(DATABASE, DBNAME), 
                                    [
                                        ('snapshotId', pymongo.ASCENDING),
                                        ('timestamp', pymongo.DESCENDING)
                                    ]
                                )

                                create_indexes(
                                    data['collection'], 
                                    config_value(DATABASE, DBNAME), 
                                    [
                                        ('_id', pymongo.DESCENDING),
                                        ('timestamp', pymongo.DESCENDING),
                                        ('snapshotId', pymongo.ASCENDING)
                                    ]
                                )
                            insert_one_document(data, data['collection'], dbname, check_keys=False)
                        else:
                            snapshot_dir = make_snapshots_dir(container)
                            if snapshot_dir:
                                store_snapshot(snapshot_dir, data)
                        if 'masterSnapshotId' in node:
                            snapshot_data[node['snapshotId']] = node['masterSnapshotId']
                        else:
                            snapshot_data[node['snapshotId']] = True
                    # else:
                    #     snapshot_data[node['snapshotId']] = False
                    node['status'] = 'active'
                else:
                    # TODO alert if notification enabled or summary for inactive.
                    node['status'] = 'inactive'
                logger.debug('Type: %s', type(data))
            else:
                alldata = get_all_nodes(
                    token, sub_name, sub_id, node, snapshot_user, snapshot_source)
                if alldata:
                    snapshot_data[node['masterSnapshotId']] = []
                    for data in alldata:
                        # insert_one_document(data, data['collection'], dbname)
                        found_old_record = False
                        for masterSnapshotId, snapshot_list in snapshot_data.items():
                            old_record = None
                            if isinstance(snapshot_list, list):
                                for item in snapshot_list:
                                    if item["path"] == data['path']:
                                        old_record = item

                                if old_record:
                                    found_old_record = True
                                    if node['masterSnapshotId'] not in old_record['masterSnapshotId']:
                                        old_record['masterSnapshotId'].append(
                                            node['masterSnapshotId'])

                        if not found_old_record:
                            snapshot_data[node['masterSnapshotId']].append(
                                {
                                    'masterSnapshotId': [node['masterSnapshotId']],
                                    'snapshotId': data['snapshotId'],
                                    'path': data['path'],
                                    'validate': validate,
                                    'status': 'active'
                                })
                    # snapshot_data[node['masterSnapshotId']] = True
                logger.debug('Type: %s', type(alldata))
        delete_from_currentdata('resources')
        delete_from_currentdata('clientId')
        delete_from_currentdata('client_secret')
        delete_from_currentdata('subscriptionId')
        delete_from_currentdata('tenant_id')
        delete_from_currentdata('token')
    return snapshot_data
def populate_aws_snapshot(snapshot, container=None):
    """
    This is an entrypoint for populating a snapshot of type aws.
    All snapshot connectors should take snapshot object and based on
    'source' field create a method to connect to the service for the
    connector.
    The 'source' field could be used by more than one snapshot, so the
    'testuser' attribute should match to the user the 'source'
    """
    dbname = config_value('MONGODB', 'dbname')
    snapshot_source = get_field_value(snapshot, 'source')
    snapshot_user = get_field_value(snapshot, 'testUser')
    account_id = get_field_value(snapshot, 'accountId')
    sub_data = get_aws_data(snapshot_source)
    snapshot_nodes = get_field_value(snapshot, 'nodes')
    snapshot_data, valid_snapshotids = validate_snapshot_nodes(snapshot_nodes)
    # valid_snapshotids = True
    # if snapshot_nodes:
    #     for node in snapshot_nodes:
    #         snapshot_data[node['snapshotId']] = False
    #         if not isinstance(node['snapshotId'], str):
    #             valid_snapshotids = False
    # if not valid_snapshotids:
    #     logger.error('All snap')
    if valid_snapshotids and sub_data and snapshot_nodes:
        logger.debug(sub_data)
        access_key, secret_access, region, connector_client_str = \
            get_aws_client_data(sub_data, snapshot_user, account_id)
        if not access_key:
            logger.info(
                "No access_key in the snapshot to access aws resource!...")
            raise Exception(
                "No access_key in the snapshot to access aws resource!...")
            # return snapshot_data

        # Read the client secrets from envirnment variable or Standard input
        # if not secret_access and ('UAMI' not in os.environ or os.environ['UAMI'] != 'true'):
        #     secret_access = get_client_secret()
        #     logger.info('Environment variable or Standard input, Secret: %s', '*' * len(secret_access))

        # Read the client secrets from the vault
        if not secret_access:
            secret_access = get_vault_data(access_key)
            if secret_access:
                logger.info('Vault Secret: %s', '*' * len(secret_access))
            else:
                logger.info("Secret Access key does not set in a vault")
                raise Exception("Secret Access key does not set in a vault")
        if not secret_access:
            logger.info(
                "No secret_access in the snapshot to access aws resource!...")
            return snapshot_data
        if access_key and secret_access:
            # existing_aws_client = {}
            for node in snapshot['nodes']:
                mastercode = False
                if 'snapshotId' in node:
                    client_str, aws_region = _get_aws_client_data_from_node(
                        node,
                        default_client=connector_client_str,
                        default_region=region)
                    if not _validate_client_name(client_str):
                        logger.error("Invalid Client Name")
                        return snapshot_data
                    try:
                        awsclient = client(client_str.lower(),
                                           aws_access_key_id=access_key,
                                           aws_secret_access_key=secret_access,
                                           region_name=aws_region)
                    except Exception as ex:
                        logger.info('Unable to create AWS client: %s', ex)
                        awsclient = None
                    logger.info(awsclient)
                    if awsclient:
                        data = get_node(awsclient, node, snapshot_source)
                        if data:
                            error_str = data.pop('error', None)
                            if get_dbtests():
                                if get_collection_size(
                                        data['collection']) == 0:
                                    #Creating indexes for collection
                                    create_indexes(
                                        data['collection'],
                                        config_value(DATABASE, DBNAME),
                                        [('snapshotId', pymongo.ASCENDING),
                                         ('timestamp', pymongo.DESCENDING)])
                                check_key = is_check_keys_required(data)
                                insert_one_document(data, data['collection'],
                                                    dbname, check_key)
                            else:
                                snapshot_dir = make_snapshots_dir(container)
                                if snapshot_dir:
                                    store_snapshot(snapshot_dir, data)
                            if 'masterSnapshotId' in node:
                                snapshot_data[node['snapshotId']] = node[
                                    'masterSnapshotId']
                            else:
                                snapshot_data[node[
                                    'snapshotId']] = False if error_str else True
                elif 'masterSnapshotId' in node:
                    mastercode = True
                    client_str, aws_region = _get_aws_client_data_from_node(
                        node,
                        default_client=connector_client_str,
                        default_region=region)
                    if not _validate_client_name(client_str):
                        logger.error("Invalid Client Name")
                        return snapshot_data
                    if aws_region:
                        all_regions = [aws_region]
                    else:
                        all_regions = Session().get_available_regions(
                            client_str.lower())
                        if client_str.lower() in ['s3', 'cloudtrail']:
                            all_regions = ['us-west-1']
                    logger.info("Length of all regions is %s" %
                                (str(len(all_regions))))
                    count = 0
                    snapshot_data[node['masterSnapshotId']] = []
                    for each_region in all_regions:
                        logger.info(each_region)
                        try:
                            awsclient = client(
                                client_str.lower(),
                                aws_access_key_id=access_key,
                                aws_secret_access_key=secret_access,
                                region_name=each_region)
                        except Exception as ex:
                            logger.info('Unable to create AWS client: %s', ex)
                        logger.info(awsclient)
                        if awsclient:
                            all_data = get_all_nodes(awsclient, node, snapshot,
                                                     sub_data)
                            if all_data:
                                for data in all_data:
                                    snapshot_data[
                                        node['masterSnapshotId']].append({
                                            'snapshotId':
                                            '%s%s' % (node['masterSnapshotId'],
                                                      str(count)),
                                            'validate':
                                            True,
                                            'detailMethods':
                                            data['detailMethods'],
                                            'structure':
                                            'aws',
                                            'masterSnapshotId':
                                            node['masterSnapshotId'],
                                            'collection':
                                            data['collection'],
                                            'arn':
                                            data['arn']
                                        })
                                    count += 1
            if mastercode:
                snapshot_data = eliminate_duplicate_snapshots(snapshot_data)
    return snapshot_data
def populate_sub_directory_snapshot(base_dir_path, sub_dir_path, snapshot,
                                    dbname, node, snapshot_data):
    dir_path = str('%s/%s' % (base_dir_path, sub_dir_path)).replace('//', '/')
    if exists_dir(dir_path):
        list_of_file = os.listdir(dir_path)
        template_file_path = ""
        deployment_file_path_list = []

        for entry in list_of_file:
            new_dir_path = ('%s/%s' % (dir_path, entry)).replace('//', '/')
            new_sub_directory_path = ('%s/%s' % (sub_dir_path, entry)).replace(
                '//', '/')
            if exists_dir(new_dir_path):
                populate_sub_directory_snapshot(base_dir_path,
                                                new_sub_directory_path,
                                                snapshot, dbname, node,
                                                snapshot_data)
            elif exists_file(new_dir_path):
                if len(entry.split(".")) > 0 and "json" in entry.split(
                        ".")[-1]:
                    json_data = json_from_file(new_dir_path)
                    if json_data and "$schema" in json_data:
                        if "deploymentTemplate.json" in json_data[
                                '$schema'].split("/")[-1]:
                            template_file_path = new_sub_directory_path
                        elif "deploymentParameters.json" in json_data[
                                '$schema'].split("/")[-1]:
                            deployment_file_path_list.append(
                                new_sub_directory_path)

        if template_file_path and deployment_file_path_list:

            location = get_field_value(node, 'location')
            new_deployment_file_path_list = []

            template_file_json_path = str(
                '%s/%s' % (base_dir_path, template_file_path)).replace(
                    '//', '/')
            for deployment_file_path in deployment_file_path_list:
                deployment_file_json_path = str(
                    '%s/%s' % (base_dir_path, deployment_file_path)).replace(
                        '//', '/')

                response = invoke_az_cli("deployment validate --location " +
                                         location + " --template-file " +
                                         template_file_json_path +
                                         " --parameters @" +
                                         deployment_file_json_path)

                if not response['error']:
                    new_deployment_file_path_list.append({
                        "path": deployment_file_path,
                        "status": "active"
                    })
                else:
                    new_deployment_file_path_list.append({
                        "path": deployment_file_path,
                        "status": "inactive"
                    })

            data_record = create_snapshot_record(
                snapshot, new_sub_directory_path, node, template_file_path,
                new_deployment_file_path_list)
            if node['masterSnapshotId'] not in snapshot_data or not isinstance(
                    snapshot_data[node['masterSnapshotId']], list):
                snapshot_data[node['masterSnapshotId']] = []

            snapshot_data[node['masterSnapshotId']] = snapshot_data[node[
                'masterSnapshotId']] + data_record['snapshots'][0]['nodes']
            if get_dbtests():
                insert_one_document(data_record, node['collection'], dbname)
            else:
                snapshot_file = '%s/%s' % (dir_path, "snapshot.json")
                save_json_to_file(data_record, snapshot_file)
def populate_arm_snapshot(container, dbname, snapshot_source, sub_data,
                          snapshot_data, node, repopath):
    """
    Populate snapshot by running arm command
    """
    dir_path = get_field_value(sub_data, 'folderPath')
    if not dir_path:
        dir_path = repopath

    location = get_field_value(node, 'location')
    paths = get_field_value(node, 'paths')

    template_file_path = ""
    deployment_file_path = ""

    if paths and isinstance(paths, list):
        if not location:
            logger.error("Invalid json : 'location' field is required in node")
            node['status'] = 'inactive'
            return snapshot_data
        for json_file in paths:
            json_file_path = '%s/%s.json' % (dir_path, json_file)
            json_data = json_from_file(json_file_path)
            if not json_data:
                logger.error("Invalid path or json")
                node['status'] = 'inactive'
                return snapshot_data

            elif "$schema" not in json_data:
                logger.error(
                    "Invalid json : does not contains '$schema' field in json."
                )
                node['status'] = 'inactive'
                return snapshot_data
            else:
                if "deploymentTemplate.json" in json_data['$schema'].split(
                        "/")[-1]:
                    template_file_path = json_file_path
                elif "deploymentParameters.json" in json_data['$schema'].split(
                        "/")[-1]:
                    deployment_file_path = json_file_path
                else:
                    logger.error(
                        "Invalid json : $schema does not contains the correct value"
                    )

        if template_file_path and deployment_file_path:
            response = invoke_az_cli("deployment validate --location " +
                                     location + " --template-file " +
                                     template_file_path + " --parameters @" +
                                     deployment_file_path)

            data_record = create_database_record(node, snapshot_source,
                                                 response, sub_data)

            if get_dbtests():
                if get_collection_size(node['collection']) == 0:
                    #Creating indexes for collection
                    create_indexes(node['collection'],
                                   config_value(DATABASE, DBNAME),
                                   [('snapshotId', pymongo.ASCENDING),
                                    ('timestamp', pymongo.DESCENDING)])
                insert_one_document(data_record, node['collection'], dbname)
            else:
                snapshot_dir = make_snapshots_dir(container)
                if snapshot_dir:
                    store_snapshot(snapshot_dir, data_record)
            snapshot_data[
                node['snapshotId']] = False if data_record['error'] else True
            node['status'] = 'active'
        else:
            node['status'] = 'inactive'
    else:
        node['status'] = 'inactive'
        logger.error(
            "Invalid json : `paths` field is missing for 'arm' node type or it is not a list"
        )

    return snapshot_data
def populate_custom_snapshot(snapshot, container=None):
    """ Populates the resources from git."""
    dbname = config_value('MONGODB', 'dbname')
    snapshot_source = get_field_value(snapshot, 'source')
    sub_data = get_custom_data(snapshot_source)
    snapshot_nodes = get_field_value(snapshot, 'nodes')
    snapshot_data, valid_snapshotids = validate_snapshot_nodes(snapshot_nodes)
    if valid_snapshotids and sub_data and snapshot_nodes:
        baserepo, repopath = _get_repo_path(sub_data, snapshot)
        if repopath:
            brnch = get_field_value_with_default(sub_data, 'branchName',
                                                 'master')
            for node in snapshot_nodes:
                node_type = node[
                    'type'] if 'type' in node and node['type'] else 'json'
                if node_type == 'arm':
                    if 'snapshotId' in node:
                        populate_arm_snapshot(container, dbname,
                                              snapshot_source, sub_data,
                                              snapshot_data, node, repopath)
                    elif 'masterSnapshotId' in node:
                        populate_all_arm_snapshot(snapshot, dbname, sub_data,
                                                  node, repopath,
                                                  snapshot_data)
                else:
                    # logger.debug(node)
                    # data = get_node(repopath, node, snapshot_source, brnch)
                    # if data:
                    #     insert_one_document(data, data['collection'], dbname)
                    #     snapshot_data[node['snapshotId']] = True
                    validate = node['validate'] if 'validate' in node else True
                    if 'snapshotId' in node:
                        logger.debug(node)
                        data = get_node(repopath, node, snapshot, brnch,
                                        sub_data)
                        if data:
                            if validate:
                                if get_dbtests():
                                    if get_collection_size(
                                            data['collection']) == 0:
                                        #Creating indexes for collection
                                        create_indexes(
                                            data['collection'],
                                            config_value(DATABASE, DBNAME),
                                            [('snapshotId', pymongo.ASCENDING),
                                             ('timestamp', pymongo.DESCENDING)
                                             ])
                                    insert_one_document(
                                        data, data['collection'], dbname)
                                else:
                                    snapshot_dir = make_snapshots_dir(
                                        container)
                                    if snapshot_dir:
                                        store_snapshot(snapshot_dir, data)
                                if 'masterSnapshotId' in node:
                                    snapshot_data[node['snapshotId']] = node[
                                        'masterSnapshotId']
                                else:
                                    snapshot_data[node['snapshotId']] = True
                            else:
                                snapshot_data[node['snapshotId']] = False
                            node['status'] = 'active'
                        else:
                            node['status'] = 'inactive'
                        logger.debug('Type: %s', type(data))
                    elif 'masterSnapshotId' in node:
                        alldata = get_all_nodes(repopath, node, snapshot,
                                                brnch, sub_data)
                        if alldata:
                            snapshot_data[node['masterSnapshotId']] = []
                            for data in alldata:
                                snapshot_data[node['masterSnapshotId']].append(
                                    {
                                        'snapshotId': data['snapshotId'],
                                        'path': data['path'],
                                        'validate': True
                                    })
                        logger.debug('Type: %s', type(alldata))
        if baserepo and os.path.exists(baserepo):
            logger.info('Repo path: %s', baserepo)
            shutil.rmtree(baserepo)
    return snapshot_data
Exemplo n.º 14
0
def populate_json_files(args):
    dbname = config_value(DATABASE, DBNAME)
    containerId = None
    if args.container:
        container_struture_list = get_documents('structures',
                                                {'type': 'container'}, dbname)
        if not container_struture_list:
            # create container_json
            create_container_json_to_db(dbname)
            container_struture_list = get_documents('structures',
                                                    {'type': 'container'},
                                                    dbname)
        container_json = container_struture_list[0]['json']
        container_list = container_json['containers']

        filtered_list = list(
            filter(lambda i: i['name'] == args.container, container_list))
        if not filtered_list:
            # add new container if container not exist
            add_new_container(args.container, dbname)
            container_struture_list = get_documents('structures',
                                                    {'type': 'container'},
                                                    dbname)
            container_json = container_struture_list[0]['json']
            container_list = container_json['containers']
            filtered_list = list(
                filter(lambda i: i['name'] == args.container, container_list))
        containerId = filtered_list[0]['containerId']
    # return containerId

    # if args.dir:
    #     logger.info("Checking this directory: %s for json files", args.dir)
    #     json_dir = args.dir
    #     if exists_dir(args.dir):
    #         for filename in glob.glob('%s/*.json' % json_dir.replace('//', '/')):
    #             json_data = json_from_file(filename)
    #             if json_data and 'fileType' in json_data:
    #                 filetype = json_data['fileType']
    #             else:
    #                 filetype = 'structure'
    #             logger.info('Storing file:%s from directory: %s', json_dir, filename)
    #             db_record = json_record(args.container, filetype, filename, json_data)
    #             if validate_json_data(db_record['json'], db_record['type']):
    #                 insert_one_document(db_record, db_record['collection'], dbname, False)
    #                 logger.debug('DB Record: %s', json.dumps(db_record, indent=2))
    #             else:
    #                 logger.info('Invalid json for type:%s', db_record['type'])
    #             logger.info('*' * 80)
    if args.file:
        logger.info("Populating %s json file.", args.file)
        json_file = args.file
        if exists_file(json_file):
            if json_file.endswith('.json'):
                json_data = json_from_file(json_file)
                if json_data and 'fileType' in json_data:
                    filetype = json_data['fileType']
                # elif args.type:
                #     filetype = args.type
                else:
                    filetype = 'structure'
                logger.info('Storing file:%s', json_file)
                db_record = json_record(args.container, filetype, json_file,
                                        json_data)
                if validate_json_data(db_record['json'], db_record['type']):
                    docId = insert_one_document(db_record,
                                                db_record['collection'],
                                                dbname, False)
                    data = {
                        'object_id': ObjectId(docId),
                        'name': db_record['name']
                    }
                    if filetype == 'masterSnapshot':
                        save_container_object(args.container,
                                              'masterSnapshots', data, dbname)
                    elif filetype == 'mastertest':
                        save_container_object(args.container, 'masterTests',
                                              data, dbname)
                    elif filetype == 'snapshot':
                        save_container_object(args.container, 'Snapshots',
                                              data, dbname)
                    elif filetype == 'test':
                        save_container_object(args.container, 'Tests', data,
                                              dbname)
                    logger.debug('DB Record: %s',
                                 json.dumps(db_record, indent=2))
                else:
                    logger.info('Invalid json for type:%s', db_record['type'])
                logger.info('*' * 80)
            elif json_file.endswith('.rego'):
                with open(json_file) as f:
                    file_content = f.read()
                    content_type = 'application/octet-stream'
                    save_container_to_db(args.container, containerId,
                                         json_file, content_type, file_content,
                                         dbname)
Exemplo n.º 15
0
def generate_container_mastersnapshots_database(container):
    """
    Get the mastersnapshot files from the container with storage system as database.
    The table or collection and database is configured in the config.ini, for the default
    location configuration is "validator" database with "mastersnapshots" as its collections.
    """
    snapshots_status = {}
    dbname = config_value(DATABASE, DBNAME)
    collection = config_value(DATABASE, collectiontypes[MASTERSNAPSHOT])
    snp_collection = config_value(DATABASE, collectiontypes[SNAPSHOT])
    qry = {'container': container}
    sort = [sort_field('timestamp', False)]
    docs = get_documents(collection, dbname=dbname, sort=sort, query=qry)
    try:
        if docs and len(docs):
            logger.info('Number of mastersnapshot Documents: %s', len(docs))
            snapshots = mastersnapshots_used_in_mastertests_database(container)
            populated = []
            for doc in docs:
                if doc['json']:
                    snapshot = doc['name']
                    if "connector" in doc['json'] and "remoteFile" in doc[
                            'json'] and doc['json']["connector"] and doc[
                                'json']["remoteFile"]:
                        _, pull_response = pull_json_data(doc['json'])
                        if not pull_response:
                            logger.info(
                                "Failed to populate master snapshot json from the git repository"
                            )
                            break

                    if snapshot in snapshots:
                        if snapshot not in populated:
                            snp_collection = config_value(
                                DATABASE, collectiontypes[SNAPSHOT])
                            snp_name = '%s_gen' % snapshot
                            snp_qry = {
                                'container': container,
                                'name': snp_name
                            }
                            snp_sort = [sort_field('timestamp', False)]
                            snp_docs = get_documents(snp_collection,
                                                     dbname=dbname,
                                                     sort=snp_sort,
                                                     query=snp_qry,
                                                     _id=True)
                            snp_json_data = {}
                            if snp_docs and len(snp_docs):
                                logger.info('Number of snapshot Documents: %s',
                                            len(snp_docs))
                                snp_json_data = snp_docs[0]
                            # Take the mastersnapshot and populate the mastersnapshot
                            snapshot_file_data = generate_mastersnapshots_from_json(
                                doc['json'], snp_json_data)
                            # Insert or update the new generated snapshot document with name='*_gen' and same container name.
                            generate_snapshot(doc['json'], snapshot_file_data)
                            if snp_json_data:
                                set_snapshot_activate_and_validate_data(
                                    doc['json'], snp_json_data['json'])
                                snp_json_data['json'] = doc['json']
                                snp_json_data["timestamp"] = int(time.time() *
                                                                 1000)
                                update_one_document(
                                    snp_json_data, snp_json_data['collection'],
                                    dbname)
                            else:
                                db_record = {
                                    "timestamp":
                                    int(time.time() * 1000),
                                    "container":
                                    container,
                                    "checksum":
                                    hashlib.md5(
                                        "{}".encode('utf-8')).hexdigest(),
                                    "type":
                                    "snapshot",
                                    "name":
                                    snp_name,
                                    "collection":
                                    "snapshots",
                                    "json":
                                    doc['json']
                                }
                                insert_one_document(db_record,
                                                    db_record['collection'],
                                                    dbname, False)
                            populated.append(snapshot)
                            snapshots_status[snapshot] = snapshot_file_data
                    else:
                        logger.error("No master testcase found for %s " %
                                     snapshot)
    except Exception as e:
        generate_crawler_run_output(container)
        raise e
    generate_crawler_run_output(container)
    return snapshots_status
Exemplo n.º 16
0
def populate_kubernetes_snapshot(snapshot, container=None):
    snapshot_nodes = get_field_value(snapshot,'nodes')
    snapshot_data, valid_snapshotids = validate_snapshot_nodes(snapshot_nodes)
    dbname = config_value('MONGODB', 'dbname')
    if valid_snapshotids  and snapshot_nodes:
        logger.debug(valid_snapshotids)
        try :
            for node in snapshot_nodes:
                validate = node['validate'] if 'validate' in node else True
                logger.info(node)
                if 'snapshotId' in node:
                    if validate:
                        kubernetes_snapshot_data = get_kubernetes_snapshot_data(snapshot,node) 
                        if kubernetes_snapshot_data :
                            error_str = kubernetes_snapshot_data.pop('error', None)
                            kubernetes_snapshot_template = make_kubernetes_snapshot_template(
                                snapshot,
                                node,
                                kubernetes_snapshot_data
                            )
                            if get_dbtests():
                                if get_collection_size(kubernetes_snapshot_template['collection']) == 0:
                                    #Creating indexes for collection
                                    create_indexes(
                                        kubernetes_snapshot_template['collection'], 
                                        config_value(DATABASE, DBNAME), 
                                        [
                                            ('snapshotId', pymongo.ASCENDING),
                                            ('timestamp', pymongo.DESCENDING)
                                        ]
                                    )
                                    create_indexes(
                                        kubernetes_snapshot_template['collection'], 
                                        config_value(DATABASE, DBNAME), 
                                        [
                                            ('_id', pymongo.DESCENDING),
                                            ('timestamp', pymongo.DESCENDING),
                                            ('snapshotId', pymongo.ASCENDING)
                                        ]
                                    )
                                insert_one_document(kubernetes_snapshot_template, kubernetes_snapshot_template['collection'], dbname,check_keys=False)
                            
                            snapshot_dir = make_snapshots_dir(container)
                            if snapshot_dir:
                                store_snapshot(snapshot_dir, kubernetes_snapshot_template)                                
                            if "masterSnapshotId" in node :
                                snapshot_data[node['snapshotId']] = node['masterSnapshotId']
                            elif "snapshotId" in node :
                                snapshot_data[node['snapshotId']] = False if error_str else True
                        else:
                            node['status'] = 'inactive'
                elif 'masterSnapshotId' in node:
                    snapshot_data = generate_crawler_snapshot(snapshot,node,snapshot_data)
                


        except Exception as ex:
                exc_type, exc_obj, exc_tb = sys.exc_info()
                fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
                logger.error('can not connect to kubernetes cluster: %s ', ex )
                logger.error('\t ERROR INFO : \n \tfile name : %s\n \tline : %s\n \ttype : %s\n \tobject : %s',fname,exc_tb.tb_lineno,exc_type,exc_obj)
                print(traceback.format_exc())
                raise ex
    return snapshot_data
Exemplo n.º 17
0
def save_container_to_db(container_name, container_id, file_name, content_type,
                         file_content, dbname):
    file_content_list = []
    structure_model_obj = get_documents('structures', {
        'json.containerId': container_id,
        'type': 'others'
    },
                                        dbname,
                                        _id=True)

    file_obj = {
        'name': file_name,
        'container_file': file_content,
        'content_type': content_type
    }

    if structure_model_obj:
        exist = False
        for file_data in structure_model_obj[0]['json']['file']:
            for key, value in file_data.items():
                if value == file_name:
                    exist = True

        if exist:
            for file_data in structure_model_obj[0]['json']['file']:
                for key, value in file_data.items():
                    if value == file_name:
                        file_data['container_file'] = file_content
        else:
            structure_model_obj[0]['json']['file'].append(file_obj)
        update_one_document(structure_model_obj[0], 'structures', dbname)
        # print(structure_model_obj)
        data = {'object_id': structure_model_obj[0]['_id'], 'name': file_name}
    else:
        file_obj = {
            'name': file_name,
            'container_file': file_content,
            'content_type': content_type
        }
        file_content_list.append(file_obj)

        container_json = {
            'name': container_name,
            'containerId': container_id,
            'file': file_content_list
        }
        structure_model_obj = {
            'checksum': '',
            'collection': 'structures',
            'container': container_name,
            'name': 'file_upload',
            'timestamp': int(datetime.datetime.now().timestamp() * 1000),
            'type': 'others',
            'json': container_json
        }
        docId = insert_one_document(structure_model_obj,
                                    structure_model_obj['collection'], dbname,
                                    False)
        # print(docId)
        data = {'object_id': ObjectId(docId), 'name': file_name}

    save_container_object(container_name, 'others', data, dbname)