Пример #1
0
    def populate_all_template_snapshot(self):
        """
        crawler function for populate all the files located at given paths and generate returns the updated `snapshot_data`
        """
        root_dir_path = get_field_value(self.connector_data, 'folderPath')
        if not root_dir_path:
            root_dir_path = self.repopath

        self.paths = get_field_value(self.node, 'paths')

        if self.paths and isinstance(self.paths, list):
            count = 0
            for path in self.paths:
                self.dir_path = str('%s/%s' % (root_dir_path, path)).replace(
                    '//', '/')
                if exists_dir(self.dir_path):
                    path = path.rstrip("/")
                    count = self.populate_sub_directory_snapshot(
                        path, self.dir_path, "", self.snapshot, self.dbname,
                        self.node, self.snapshot_data, count)
                    # list_of_file = os.listdir(self.dir_path)
                    # for entry in list_of_file:
                    #     count = self.populate_sub_directory_snapshot(path, self.dir_path, entry, self.snapshot, self.dbname, self.node, self.snapshot_data, count)
                else:
                    logger.error("Invalid path : directory does not exist : " +
                                 self.dir_path)
        else:
            logger.error(
                "\t\tERROR: Invalid json : `paths` is not a list or is not exist in master snapshot"
            )

        return self.snapshot_data
Пример #2
0
 def populate_snapshots(self, snapshot_json_data):
     """
     Every snapshot should have collection of nodes which are to be populated.
     Each node in the nodes list of the snapshot shall have a unique id in this
     container so as not to clash with other node of a snapshots.
     """
     snapshot_data = {}
     snapshots = get_field_value(snapshot_json_data, 'snapshots')
     if not snapshots:
         logger.error("Json Snapshot does not contain snapshots, next!...")
         return snapshot_data
     for snapshot in snapshots:
         connector_data = self.get_structure_data(snapshot)
         snapshot_type = get_field_value(connector_data, "type")
         if snapshot_type and snapshot_type in self.snapshot_fns:
             if 'nodes' not in snapshot or not snapshot['nodes']:
                 logger.error("No nodes in snapshot to be backed up!...")
                 return snapshot_data
             if snapshot_type == 'azure' or snapshot_type == 'filesystem':
                 current_data = self.snapshot_fns[snapshot_type](
                     snapshot, self.container)
             else:
                 current_data = self.snapshot_fns[snapshot_type](snapshot,
                                                                 self)
             logger.info('Snapshot: %s', current_data)
             snapshot_data.update(current_data)
     return snapshot_data
def populate_all_arm_snapshot(snapshot, dbname, sub_data, node, repopath,
                              snapshot_data):
    """
    Populate all snapshot by running arm command
    """
    root_dir_path = get_field_value(sub_data, 'folderPath')
    if not root_dir_path:
        root_dir_path = repopath

    location = get_field_value(node, 'location')
    paths = get_field_value(node, 'paths')
    if paths and isinstance(paths, list):
        count = 0
        for path in paths:
            count += 1
            dir_path = str('%s/%s' % (root_dir_path, path)).replace('//', '/')
            if exists_dir(dir_path):
                list_of_file = os.listdir(dir_path)
                for entry in list_of_file:
                    populate_sub_directory_snapshot(dir_path, entry, snapshot,
                                                    dbname, node,
                                                    snapshot_data)
            else:
                logger.error("Invalid json : directory does not exist : " +
                             dir_path)
    else:
        logger.error(
            "Invalid json : `paths` field is missing for 'arm' node type or it is not a list"
        )

    return
def _get_aws_client_data_from_node(node,
                                   default_client=None,
                                   default_region=None):
    """
    Fetches client name and region from ARN, then from the node, 
    then from the connector.
    """
    aws_region = client_str = None
    arn_str = get_field_value(node, 'arn')
    if arn_str:
        arn_obj = arnparse(arn_str)
        client_str = arn_obj.service
        aws_region = arn_obj.region
    if not client_str:
        client_str = get_field_value(node, 'client')
    if not client_str:
        logger.info(
            "No client type provided in snapshot, using client type from connector"
        )
        client_str = default_client
    if not aws_region:
        aws_region = get_field_value(node, 'region')
    if not aws_region:
        logger.info(
            "No region provided in snapshot, using region from connector")
        aws_region = default_region
    aws_region = aws_region or default_region
    client_str = client_str or default_client
    return client_str, aws_region
Пример #5
0
def generate_snapshot(snapshot_json_data, snapshot_file_data):
    """
    Checks if the snapshot is a master snapshot file.
    """
    if snapshot_json_data:
        snapshot_type = get_field_value(snapshot_json_data, 'fileType')
        if snapshot_type and snapshot_type == 'masterSnapshot':
            snapshots = get_field_value(snapshot_json_data, 'snapshots')
            if snapshots:
                for snapshot in snapshots:
                    nodes = get_field_value(snapshot, 'nodes')
                    if nodes:
                        new_nodes = []
                        for node in nodes:
                            mastersnapshotId = get_field_value(
                                node, 'masterSnapshotId')
                            if mastersnapshotId and mastersnapshotId in snapshot_file_data and \
                                    isinstance(snapshot_file_data[mastersnapshotId], list):
                                for sid_data in snapshot_file_data[
                                        mastersnapshotId]:
                                    structure = sid_data.pop('structure', None)
                                    if structure and structure == 'aws':
                                        newnode = {}
                                    else:
                                        newnode = copy.deepcopy(node)
                                    newnode.update(sid_data)
                                    new_nodes.append(newnode)
                        # if new_nodes:
                        snapshot['nodes'] = new_nodes
            snapshot_json_data["fileType"] = "snapshot"
Пример #6
0
 def handle_params(self, params_expr):
     # print(json.dumps(gparams, indent=2))
     # print('@' * 50)
     # print(params_expr)
     ex_params = None
     exmatch = re.match(r'^(\(.*\))(.*)', params_expr, re.I)
     if exmatch:
         field, ex_params = exmatch.groups()
         val = field[1:-1].strip().replace("'", "")
     else:
         val = params_expr.strip().replace("'", "")
     # print(val)
     if val in self.gparams:
         # print(json.dumps(gparams[val], indent=2))
         if 'value' in self.gparams[val]:
             if ex_params:
                 return True, get_field_value(self.gparams[val]['value'], ex_params)
             return True, self.gparams[val]['value']
         elif 'defaultValue' in self.gparams[val]:
             if ex_params:
                 return True, get_field_value(self.gparams[val]['defaultValue'], ex_params)
             return True, self.gparams[val]['defaultValue']
     else:
         logger.warning("%s does not exist" % val)
     return True, val
Пример #7
0
def run_json_validation_tests(test_json_data,
                              container,
                              filesystem=True,
                              snapshot_status=None):
    resultset = []
    if not test_json_data:
        return resultset
    if not snapshot_status:
        snapshot_status = {}
    logger.debug(json.dumps(test_json_data, indent=2))
    testsets = get_field_value(test_json_data, 'testSet')
    if not testsets or not isinstance(testsets, list):
        logger.info("Test json does not contain testset, next!...")
        return resultset
    dbname = config_value(DATABASE, DBNAME)
    # Populate the snapshotId => collection for the snapshot.json in the test file.
    collection_data = get_snapshot_id_to_collection_dict(
        test_json_data['snapshot'], container, dbname, filesystem)
    if test_json_data['snapshot'] in snapshot_status:
        current_snapshot_status = snapshot_status[test_json_data['snapshot']]
    else:
        current_snapshot_status = {}
    for testset in testsets:
        version = get_field_value(testset, 'version')
        testcases = get_field_value(testset, 'cases')
        if not testcases or not isinstance(testcases, list):
            logger.info("No testcases in testSet!...")
            continue
        for testcase in testset['cases']:
            result_val = run_validation_test(version, container, dbname,
                                             collection_data, testcase)
            resultset.append(result_val)
    return resultset
Пример #8
0
def get_google_client_data(google_data, snapshot_user, node_type, project_id):
    """
    Generate Google Service Account credentials object from the google structure file.
    """
    credentials = None
    found = False
    if google_data and snapshot_user:
        projects = get_field_value(google_data, "projects")
        for project in projects:
            structure_project_id = get_field_value(project, 'project-id')
            if structure_project_id == project_id:
                users = get_field_value(project, 'users')
                if users:
                    for user in users:
                        user_name = get_field_value(user, 'name')
                        if user_name == snapshot_user:
                            found = True
                            gce = generate_gce(google_data, project, user)
                            if gce:
                                save_json_to_file(gce, '/tmp/gce.json')
                                logger.info("Creating credential object")
                                scopes = ['https://www.googleapis.com/auth/compute', "https://www.googleapis.com/auth/cloud-platform"]
                                credentials = ServiceAccountCredentials.from_json_keyfile_name('/tmp/gce.json', scopes)
                                # service_name = get_service_name(node_type)
                                # compute = discovery.build(service_name, 'v1', credentials=credentials, cache_discovery=False)
                            break 
            if found:
                break
    return credentials
def test_put_value():
    data_new = copy.deepcopy(data_dict)
    put_value(data_new, 'a.b', 1)
    assert 1 == get_field_value(data_new, 'a.b')
    put_value(data_new, '.a.b', 2)
    assert 2 == get_field_value(data_new, 'a.b')
    put_value(data_new, 'm.n.o', {'a': {'b': 'c'}})
    assert {'a': {'b': 'c'}} == get_field_value(data_new, 'm.n.o')
Пример #10
0
def _get_repo_path(connector, snapshot):
    if connector and isinstance(connector, dict):
        git_provider = get_field_value(connector, "gitProvider")
        folder_path = get_field_value(connector, "folderPath")
        if git_provider:
            return git_clone_dir(connector)
        elif folder_path:
            return _local_file_directory(connector, snapshot)
    logger.error("Invalid connector or missing folderPath/gitProvider")
    return None, None
def generate_gce(google_data, project, user):
    """
    Generate client secret json from the google data
    """
    print("Generate GC called")
    gce = {
        "type":
        get_field_value(user, "type"),
        "project_id":
        get_field_value(project, "project-id"),
        "private_key_id":
        get_field_value(user, "private_key_id"),
        "client_email":
        get_field_value(user, "client_email"),
        "client_id":
        get_field_value(user, "client_id"),
        "auth_uri":
        get_field_value(google_data, "auth_uri"),
        "token_uri":
        get_field_value(google_data, "token_uri"),
        "auth_provider_x509_cert_url":
        get_field_value(google_data, "auth_provider_x509_cert_url"),
        "client_x509_cert_url":
        get_field_value(google_data, "client_x509_cert_url"),
    }
    gce = get_private_key(gce)
    return gce
def get_all_nodes(awsclient, node, snapshot, connector):
    """ Fetch all the nodes from the cloned git repository in the given path."""
    db_records = []
    arn_string = "arn:aws:%s:%s::%s"
    collection = node['collection'] if 'collection' in node else COLLECTION
    snapshot_source = get_field_value(snapshot, 'source')
    parts = snapshot_source.split('.')
    d_record = {
        "structure": "aws",
        "reference": "",
        "source": parts[0],
        "path": '',
        "timestamp": int(time.time() * 1000),
        "queryuser": "",
        "checksum": hashlib.md5("{}".encode('utf-8')).hexdigest(),
        "node": node,
        "snapshotId": None,
        "masterSnapshotId": node['masterSnapshotId'],
        "collection": collection.replace('.', '').lower(),
        "json": {}
    }
    list_function_name = get_field_value(node, 'listMethod')
    if list_function_name:
        list_function = getattr(awsclient, list_function_name, None)
        if list_function and callable(list_function):
            try:
                list_kwargs = _get_list_function_kwargs(
                    awsclient.meta._service_model.service_name,
                    list_function_name)
                response = list_function(**list_kwargs)
                list_of_resources = _get_resources_from_list_function(
                    response, list_function_name)
            except Exception as ex:
                list_of_resources = []
            detail_methods = get_field_value(node, 'detailMethods')
            for each_resource in list_of_resources:
                type_list = []
                if "arn:" in each_resource:
                    resource_arn = each_resource
                else:
                    resource_arn = arn_string % (
                        awsclient.meta._service_model.service_name,
                        awsclient.meta.region_name, each_resource)
                for each_method_str in detail_methods:
                    each_method = getattr(awsclient, each_method_str, None)
                    if each_method and callable(each_method):
                        type_list.append(each_method_str)
                db_record = copy.deepcopy(d_record)
                db_record['detailMethods'] = type_list
                db_record['arn'] = resource_arn
                db_records.append(db_record)

    return db_records
Пример #13
0
def get_aws_client_data(aws_data, snapshot_user, account_id):
    """
    AWS client information as required by the Boto client, viz access_key
    access_secret, AWS command type like EC2, S3 etc and region
    The access_secret is either read from structure json or env variable or keyvault
    """
    accesskey = None
    secret_access = None
    region = None
    client_str = None
    if aws_data and snapshot_user:
        accounts = get_field_value(aws_data, "accounts")
        if accounts:
            found = False
            for account in accounts:
                if account_id == get_field_value(account, "account-id"):
                    users = get_field_value(account, "users")
                    if users:
                        for user in users:
                            if snapshot_user == get_field_value(user, "name"):
                                found = True
                                accesskey = get_field_value(user, 'access-key')
                                secret_access = get_field_value(
                                    user, 'secret-access')
                                region = get_field_value(user, 'region')
                                client_str = get_field_value(user, 'client')
                                if client_str and not _validate_client_name(
                                        client_str):
                                    logger.error("Invalid Client Name")
                                break
                if found:
                    break

    return accesskey, secret_access, region, client_str
Пример #14
0
def _local_file_directory(connector, snapshot):
    final_path, repopath = None, None
    connector_user = get_field_value(connector, 'username')
    snapshot_user = get_field_value(snapshot, 'testUser')
    if snapshot_user == connector_user:
        folder_path = get_field_value(connector, 'folderPath')
        logger.info("Folder path: %s", folder_path)
        if exists_dir(folder_path):
            final_path = folder_path
        else:
            logger.error("Given folder path is not a directory")
        return repopath, final_path
    else:
        logger.error("Connector and snapshot user do not match.")
        return repopath, final_path 
Пример #15
0
def get_node(credentials, node, snapshot_source, snapshot):
    """
    Fetch node from google using connection. In this case using google client API's
    functions.
    """
    collection = node['collection'] if 'collection' in node else COLLECTION
    parts = snapshot_source.split('.')
    project_id = get_field_value_with_default(snapshot, 'project-id',"")
    path = get_field_value_with_default(node, 'path',"")
    zone = re.findall(r"(?<=zones\/)[a-zA-Z0-9\-]*(?=\/)", path)
    db_record = {
        "structure": "google",
        "error": None,
        "reference": project_id,
        "source": parts[0],
        "path": path,
        "timestamp": int(time.time() * 1000),
        "queryuser": get_field_value(snapshot, 'testUser'),
        "checksum": hashlib.md5("{}".encode('utf-8')).hexdigest(),
        "node": node,
        "snapshotId": node['snapshotId'],
        "collection": collection.replace('.', '').lower(),
        "region" : zone[0] if zone else "",
        "json": {}  # Refactor when node is absent it should None, when empty object put it as {}
    }

    try:
        access_token = credentials.get_access_token().access_token
        header = {
            "Authorization" : ("Bearer %s" % access_token)
        }

        node_type = node['type'] if node and 'type' in node else ""
        base_node_type_list = node_type.split("/")
        if len(base_node_type_list) > 1:
            base_node_type = base_node_type_list[0]
        else:
            logger.error("Invalid node type %s", node_type)
            return db_record
        
        base_url = "%s%s" % (base_node_type, ".googleapis.com")
        request_url = "https://%s/%s" % (base_url, path)
        logger.info("Invoke request for get snapshot: %s", request_url)
        status, data = http_get_request(request_url, header)
        logger.info('Get snapshot status: %s', status)
        
        if status and isinstance(status, int) and status == 200:
            if data:
                db_record['json'] = data
                checksum = get_checksum(data)
                if checksum:
                    db_record['checksum'] = checksum
        else:
            logger.error("Get snapshot returned invalid status: %s", status)
            db_record['error'] = ("Get snapshot returned invalid status: %s" % status)
    except Exception as ex:
        logger.error('Failed to populate the snapshot : %s', ex)
        db_record['error'] = 'Failed to populate the snapshot: %s' % ex
    
    return db_record
Пример #16
0
def create_kube_apiserver_instance(snapshot,node):
    """
    create_kube_apiserver_instance creating kubernetes apiserver instance to have 
    communication and get response kubernetes apiserver 
    """
    snapshot_serviceAccount = get_field_value(snapshot,'serviceAccount')
    snapshot_namespace = get_field_value(snapshot,'namespace')
    node_type =  get_field_value(node,'type')
    snapshot_source = get_field_value(snapshot,'source')
    kubernetes_structure_data = get_kubernetes_structure_data(snapshot_source)
    api_instance = None
    service_account_secret = get_client_secret(kubernetes_structure_data,snapshot_serviceAccount,snapshot_namespace)

    cluster_url = get_field_value(kubernetes_structure_data,'clusterUrl')
    api_instance = create_kube_apiserver_instance_client(cluster_url,service_account_secret,node_type)
    return api_instance
def get_google_call_function(node):
    """Get the callable for the type of compute resource."""
    fn_str_list = None
    kwargs = {}
    if node and 'type' in node and node['type']:
        path = get_field_value(node, 'path')
        path = path[:-1] if path and path.endswith('/') else path
        path = path[1:] if path and path.startswith('/') else path
        params = path.split('/')

        if node and 'type' in node and node['type']:
            fn_str = get_field_value(node, 'type')
            if fn_str:
                fn_str_list = fn_str.split(".")
            kwargs = get_call_kwargs(node)
    return fn_str_list, kwargs
def get_all_nodes(repopath, node, snapshot, ref, connector):
    """ Fetch all the nodes from the cloned git repository in the given path."""
    db_records = []
    collection = node['collection'] if 'collection' in node else COLLECTION
    given_type = get_field_value(connector, "type")
    base_path = get_field_value_with_default(connector, "folderPath", "")
    snapshot_source = get_field_value(snapshot, 'source')
    parts = snapshot_source.split('.')
    d_record = {
        "structure": given_type,
        "reference": ref if not base_path else "",
        "source": parts[0],
        "path": '',
        "timestamp": int(time.time() * 1000),
        "queryuser": "",
        "checksum": hashlib.md5("{}".encode('utf-8')).hexdigest(),
        "node": node,
        "snapshotId": None,
        "masterSnapshotId": node['masterSnapshotId'],
        "collection": collection.replace('.', '').lower(),
        "json": {}
    }
    node_type = node['type'] if 'type' in node and node['type'] else 'json'
    json_path = '%s/%s' % (repopath, node['path'])
    file_path = json_path.replace('//', '/')
    logger.info('Dir: %s', file_path)
    if exists_dir(file_path):
        count = 0
        for filename in glob.glob('%s/*.json' % file_path.replace('//', '/')):
            parts = filename.rsplit('/', 1)
            path = '%s/%s' % (node['path'], parts[-1])
            json_data = convert_to_json(filename, node_type)
            logger.info('type: %s, json:%s', node_type, json_data)
            if json_data:
                db_record = copy.deepcopy(d_record)
                db_record['snapshotId'] = '%s%s' % (node['masterSnapshotId'],
                                                    str(count))
                db_record['path'] = path.replace('//', '/')
                db_record['json'] = json_data
                data_str = json.dumps(json_data)
                db_record['checksum'] = hashlib.md5(
                    data_str.encode('utf-8')).hexdigest()
                db_records.append(db_record)
                count += 1
    else:
        logger.info('Get requires valid directory for snapshot not present!')
    return db_records
Пример #19
0
def set_snapshot_data(node, items, snapshot_data):
    if node['masterSnapshotId'] not in snapshot_data or not isinstance(snapshot_data[node['masterSnapshotId']], list):
        snapshot_data[node['masterSnapshotId']] =  []

    # create the node type for sub resources
    node_type = get_field_value(node, "type")
    node_type_list = node_type.split(".")
    resource_node_type = node_type
    if len(node_type_list) > 1:
        del node_type_list[-1]
        node_type_list.append("get")
        resource_node_type = ".".join(node_type_list)

    count = 0
    resource_items = []
    if isinstance(items, dict):
        for zone, resource in items.items():
            if 'selfLink' in resource:
                resource_items.append(resource)
            else:
                resource_type = node_type.split("/")[1].split(".")[-2]
                if resource_type in resource and isinstance(resource[resource_type], list):
                    if len(resource[resource_type]) > 0 and 'selfLink' in resource[resource_type][0]:
                        resource_items += resource[resource_type]
    else:
        resource_items = items

    for item in resource_items:
        count += 1
        path_list = item['selfLink'].split("https://")
        path_list = path_list[1].split('/')
        path = "/".join(path_list[1:])

        found_old_record = False
        for masterSnapshotId, snapshot_list in snapshot_data.items():
            old_record = None
            if isinstance(snapshot_list, list):
                for item in snapshot_list:
                    if item["path"] == path:
                        old_record = item

                if old_record:
                    found_old_record = True
                    if node['masterSnapshotId'] not in old_record['masterSnapshotId']:
                        old_record['masterSnapshotId'].append(
                            node['masterSnapshotId'])

        if not found_old_record:
            snapshot_data[node['masterSnapshotId']].append(
                {
                    "masterSnapshotId" : [node['masterSnapshotId']],
                    "snapshotId": '%s%s' % (node['masterSnapshotId'], str(count)),
                    "type": resource_node_type,
                    "collection": node['collection'],
                    "path": path,
                    "status" : "active",
                    "validate" : node['validate'] if 'validate' in node else True
                })
    return snapshot_data
def get_google_call_function_for_crawler(node, project_id):
    """Get the callable for the type of compute resource."""
    fn_str_list = None
    kwargs = None
    if node and 'type' in node and node['type']:
        fn_str_list = get_field_value(node, 'type').split(".")
        kwargs = get_call_kwargs_for_crawler(node, project_id)
    return fn_str_list, kwargs
Пример #21
0
def get_list_namespaced_service_account(snapshot,node):
    snapshot_namespaces = get_field_value(snapshot,'namespace')
    service_account_items = []
    api_instance = create_kube_apiserver_instance(snapshot,node)
    for snapshot_namespace in snapshot_namespaces:
        api_response = api_instance.list_namespaced_service_account(namespace=snapshot_namespace)
        api_response_dict = todict(api_response) 
        api_response_dict_items = get_field_value(api_response_dict,'items')
        for api_response_dict_item in api_response_dict_items :
            service_account_name = get_field_value(api_response_dict_item,'metadata.name')
            service_account_path = "api/v1/namespaces/%s/serviceaccounts/%s" % (snapshot_namespace,service_account_name)
            service_account_items.append({
                'namespace': snapshot_namespace,
                'paths':[
                    service_account_path
                ]
            })
    return service_account_items
Пример #22
0
def get_list_cluster_role_binding(snapshot,node):
    snapshot_namespaces = get_field_value(snapshot,'namespace')
    cluster_role_binding_items = []
    api_instance = create_kube_apiserver_instance(snapshot,node)
    for snapshot_namespace in snapshot_namespaces:
        api_response = api_instance.list_cluster_role()
        api_response_dict = todict(api_response) 
        api_response_dict_items = get_field_value(api_response_dict,'items')
        for api_response_dict_item in api_response_dict_items :
            cluster_role_binding_name = get_field_value(api_response_dict_item,'metadata.name')
            cluster_role_binding_path = "apis/rbac.authorization.k8s.io/v1beta1/clusterrolebindings/%s" % (cluster_role_binding_name)
            cluster_role_binding_items.append({
                'namespace': snapshot_namespace,
                'paths':[
                    cluster_role_binding_path
                ]
            })
    return cluster_role_binding_items
Пример #23
0
def get_list_namespaced_pod_security_policy(snapshot,node):
    snapshot_namespaces = get_field_value(snapshot,'namespace')
    pod_security_policy_items = []
    api_instance = create_kube_apiserver_instance(snapshot,node)
    for snapshot_namespace in snapshot_namespaces:
        api_response = api_instance.list_pod_security_policy()
        api_response_dict = todict(api_response) 
        api_response_dict_items = get_field_value(api_response_dict,'items')
        for api_response_dict_item in api_response_dict_items :
            pod_security_policy_name = get_field_value(api_response_dict_item,'metadata.name')
            pod_security_policy_path = "apis/policy/v1beta1/podsecuritypolicies/%s" % (pod_security_policy_name)
            pod_security_policy_items.append({
                'namespace': snapshot_namespace,
                'paths':[
                    pod_security_policy_path
                ]
            })
    return pod_security_policy_items
Пример #24
0
def get_list_namespaced_network_policy(snapshot,node):
    snapshot_namespaces = get_field_value(snapshot,'namespace')
    network_policy_items = []
    api_instance = create_kube_apiserver_instance(snapshot,node)
    for snapshot_namespace in snapshot_namespaces:
        api_response = api_instance.list_namespaced_network_policy(namespace=snapshot_namespace)
        api_response_dict = todict(api_response) 
        api_response_dict_items = get_field_value(api_response_dict,'items')
        for api_response_dict_item in api_response_dict_items :
            network_policy_name = get_field_value(api_response_dict_item,'metadata.name')
            network_policy_path = "apis/networking.k8s.io/v1/namespaces/%s/networkpolicies/%s" % (snapshot_namespace,network_policy_name)
            network_policy_items.append({
                'namespace': snapshot_namespace,
                'paths':[
                    network_policy_path
                ]
            })
    return network_policy_items
def test_get_field_value():
    assert None == get_field_value(data_dict, None)
    assert None == get_field_value(None, 'c.d')
    assert None == get_field_value(data_dict, 'c.d.e')
    assert 'b' == get_field_value(data_dict, 'a')
    assert 'e' == get_field_value(data_dict, 'c.d')
    assert 1 == get_field_value(data_dict, 'f.g.h')
    assert {'h': 1} == get_field_value(data_dict, 'f.g')
Пример #26
0
def get_client_secret(kubernetes_structure_data,snapshot_serviceAccount,snapshot_namespace):
    """
    get_client_secret get service account from master snapshot and will 
    compare with other service accounts which allocated in kubernetes
    structure file and get secret of service account if it’s exist in
    structure file. Also check environment variables if service account
    secret isn’t exist in structure file with the name got from snap shot file.
    This function return secret as string which will use to get connection with 
    kubernetes cluster.

    """
    global Cache_namespace,Cache_secret
    if snapshot_namespace  == Cache_namespace:
        return Cache_secret


    namespaces = get_field_value(kubernetes_structure_data,'namespaces')
    service_account_secret = ""
    for namespace in namespaces :
        service_accounts = get_field_value(namespace,'serviceAccounts')
        for service_account in service_accounts :
            if snapshot_serviceAccount == service_account['name'] and namespace['namespace'] in snapshot_namespace :
                service_account_secret = get_field_value(service_account,'secret')
                if service_account_secret is not None:
                    Cache_secret= service_account_secret
                    Cache_namespace = snapshot_namespace
                    return service_account_secret
                else :
                    service_account_secret = get_vault_data(service_account['id'])
                    if  service_account_secret is not None:
                        Cache_secret= service_account_secret
                        Cache_namespace = snapshot_namespace
            

    
   
            

    
    if service_account_secret == "" :
        logger.error("\t\t ERROR : can not find secret for service account : %s" % (snapshot_serviceAccount))

    return service_account_secret 
Пример #27
0
def get_list_namespaced_pods(snapshot,node):

    snapshot_namespaces = get_field_value(snapshot,'namespace')
    pod_items = []
    api_instance = create_kube_apiserver_instance(snapshot,node)
    for snapshot_namespace in snapshot_namespaces:
        api_response = api_instance.list_namespaced_pod(namespace=snapshot_namespace)
        api_response_dict = todict(api_response) 
        api_response_dict_items = get_field_value(api_response_dict,'items')
        for api_response_dict_item in api_response_dict_items :
            pod_name = get_field_value(api_response_dict_item,'metadata.name')
            pod_path = "api/v1/namespaces/%s/pods/%s" % (snapshot_namespace,pod_name)
            pod_items.append({
                'namespace': snapshot_namespace,
                'paths':[
                    pod_path
                ]
            })
    return pod_items
Пример #28
0
def get_snapshot_id_to_collection_dict(snapshot_file,
                                       container,
                                       dbname,
                                       filesystem=True):
    snapshot_data = {}
    snapshot_json_data = {}
    if filesystem:
        file_name = '%s.json' % snapshot_file if snapshot_file and not \
            snapshot_file.endswith('.json') else snapshot_file
        snapshot_file = '%s/%s/%s' % (get_test_json_dir(), container,
                                      file_name)
        snapshot_json_data = json_from_file(snapshot_file)
    else:
        parts = snapshot_file.split('.')
        collection = config_value(DATABASE, collectiontypes[SNAPSHOT])
        qry = {'container': container, 'name': parts[0]}
        sort = [sort_field('timestamp', False)]
        docs = get_documents(collection,
                             dbname=dbname,
                             sort=sort,
                             query=qry,
                             limit=1)
        logger.info('Number of Snapshot Documents: %s', len(docs))
        if docs and len(docs):
            snapshot_json_data = docs[0]['json']
    snapshots = get_field_value(snapshot_json_data, 'snapshots')
    if not snapshots:
        logger.info("Snapshot does not contain snapshots...")
        return snapshot_data
    for snapshot in snapshots:
        nodes = get_field_value(snapshot, 'nodes')
        if not nodes:
            logger.info("No nodes in snapshot, continuing to next!...")
            continue
        for node in nodes:
            sid = get_field_value(node, 'snapshotId')
            coll = node['collection'] if 'collection' in node else COLLECTION
            collection = coll.replace('.', '').lower()
            snapshot_data[sid] = collection
            if get_dbtests():
                create_indexes(collection, dbname,
                               [('timestamp', pymongo.TEXT)])
    return snapshot_data
def populate_snapshot(snapshot, container):
    """
    Every snapshot should have collection of nodes which are to be populated.
    Each node in the nodes list of the snapshot shall have a unique id in this
    container so as not to clash with other node of a snapshots.
    """
    snapshot_data = {}
    snapshot_type = None
    snapshot_source = get_field_value(snapshot, "source")
    connector_data = get_custom_data(snapshot_source)
    if connector_data:
        snapshot_type = get_field_value(connector_data, "type")
    if snapshot_type and snapshot_type in snapshot_fns:
        if 'nodes' not in snapshot or not snapshot['nodes']:
            logger.error("No nodes in snapshot to be backed up!...")
            return snapshot_data
        snapshot_data = snapshot_fns[snapshot_type](snapshot, container)
    logger.info('Snapshot: %s', snapshot_data)
    return snapshot_data
 def process_rego_test_case(self):
     inputjson = {}
     result = False
     opa_exe = opa_binary()
     if not opa_exe:
         # print('*' * 50)
         return result
     rule_expr = get_field_value(self.testcase, 'eval')
     if not rule_expr:
         rule_expr = 'data.rule.rulepass'
     if len(self.testcase['snapshotId']) == 1:
         sid = self.testcase['snapshotId'][0]
         inputjson = self.get_snaphotid_doc(sid)
     else:
         ms_id = dict(
             zip(self.testcase['snapshotId'],
                 self.testcase['masterSnapshotId']))
         for sid in self.testcase['snapshotId']:
             inputjson.update({ms_id[sid]: self.get_snaphotid_doc(sid)})
     if inputjson:
         save_json_to_file(inputjson, '/tmp/input.json')
         rego_rule = self.rule
         rego_match = re.match(r'^file\((.*)\)$', rego_rule, re.I)
         if rego_match:
             # rego_file = get_rego_rule_filename(rego_match.groups()[0], self.container)
             rego_file = self.rego_rule_filename(rego_match.groups()[0],
                                                 self.container)
             if rego_file:
                 pass
             else:
                 rego_file = None
             # rego_file1 = self.rego_rule_filename(rego_match.groups()[0], "google_crawler_container")
             # rego_file1 = self.rego_rule_filename('google_crawler.rego', "google_crawler_container")
             # print(rego_file1)
         else:
             rego_txt = [
                 "package rule", "default rulepass = false",
                 "rulepass = true{",
                 "   %s" % rego_rule, "}", ""
             ]
             rego_file = '/tmp/input.rego'
             open(rego_file, 'w').write('\n'.join(rego_txt))
         if rego_file:
             os.system(
                 '%s eval -i /tmp/input.json -d %s "%s" > /tmp/a.json' %
                 (opa_exe, rego_file, rule_expr))
             resultval = json_from_file('/tmp/a.json')
             if resultval:
                 resultbool = resultval['result'][0]['expressions'][0][
                     'value']  # [get_field_value(resultval, 'result[0].expressions[0].value')
                 if resultbool:
                     result = parsebool(resultbool)
         else:
             result = False
     return result