コード例 #1
0
def get_google_client_data(google_data, snapshot_user, node_type, project_id):
    """
    Generate Google Service Account credentials object from the google structure file.
    """
    credentials = None
    found = False
    if google_data and snapshot_user:
        projects = get_field_value(google_data, "projects")
        for project in projects:
            structure_project_id = get_field_value(project, 'project-id')
            if structure_project_id == project_id:
                users = get_field_value(project, 'users')
                if users:
                    for user in users:
                        user_name = get_field_value(user, 'name')
                        if user_name == snapshot_user:
                            found = True
                            gce = generate_gce(google_data, project, user)
                            if gce:
                                save_json_to_file(gce, '/tmp/gce.json')
                                logger.info("Creating credential object")
                                scopes = ['https://www.googleapis.com/auth/compute', "https://www.googleapis.com/auth/cloud-platform"]
                                credentials = ServiceAccountCredentials.from_json_keyfile_name('/tmp/gce.json', scopes)
                                # service_name = get_service_name(node_type)
                                # compute = discovery.build(service_name, 'v1', credentials=credentials, cache_discovery=False)
                            break 
            if found:
                break
    return credentials
コード例 #2
0
def generate_snapshots_from_mastersnapshot_file(mastersnapshot_file):
    """
    Each snapshot file from the filesystem is loaded as a json datastructue
     and generate all the nodes in this json datastructure.
    """
    mastersnapshot_file_name = '%s.json' % mastersnapshot_file if mastersnapshot_file and not \
        mastersnapshot_file.endswith('.json') else mastersnapshot_file
    mastersnapshot_json_data = json_from_file(mastersnapshot_file_name)
    if not mastersnapshot_json_data:
        logger.error("masterSnapshot file %s looks to be empty, next!...",
                     mastersnapshot_file)
        return {}, {}

    if "connector" in mastersnapshot_json_data and "remoteFile" in mastersnapshot_json_data and mastersnapshot_json_data[
            "connector"] and mastersnapshot_json_data["remoteFile"]:
        _, pull_response = pull_json_data(mastersnapshot_json_data)
        if not pull_response:
            return {}, {}
    logger.debug(json.dumps(mastersnapshot_json_data, indent=2))
    parts = mastersnapshot_file_name.rsplit('.', 1)
    snapshot_file_name = '%s_gen.%s' % (parts[0], parts[1])
    snapshot_json_data = json_from_file(snapshot_file_name)
    if not snapshot_json_data:
        snapshot_json_data = {}
    snapshot_data = generate_mastersnapshots_from_json(
        mastersnapshot_json_data, snapshot_json_data)
    # save_json_to_file(mastersnapshot_json_data, mastersnapshot_file)
    if exists_file(snapshot_file_name):
        remove_file(snapshot_file_name)

    save_json_to_file(snapshot_json_data, snapshot_file_name)
    return snapshot_data, mastersnapshot_json_data
コード例 #3
0
def dump_output_results(results,
                        container,
                        test_file,
                        snapshot,
                        filesystem=True):
    """ Dump the report in the json format for test execution results."""
    od = OrderedDict()
    od["$schema"] = ""
    od["contentVersion"] = "1.0.0.0"
    od["fileType"] = OUTPUT
    od["timestamp"] = int(time.time() * 1000)
    od["snapshot"] = snapshot
    od["container"] = container
    dblog = get_dblogger()
    od["log"] = dblog if dblog else ""
    if filesystem:
        test_file_parts = test_file.rsplit('/', 1)
        od["test"] = test_file_parts[-1]
        output_file = '%s/output-%s' % (test_file_parts[0],
                                        test_file_parts[-1])
        od["results"] = results
        save_json_to_file(od, output_file)
    else:
        od["test"] = test_file
        od["results"] = results
        del od["$schema"]
        doc = json_record(container, OUTPUT, test_file, od)
        dbname = config_value(DATABASE, DBNAME)
        collection = config_value(DATABASE, collectiontypes[OUTPUT])
        insert_one_document(doc, collection, dbname)
コード例 #4
0
def convert_terraform_to_json(terraform, output=None):
    if exists_file(terraform):
        if not output:
            parts = terraform.rsplit('.', -1)
            output = '%s.json' % parts[0]
        json_data = convert_to_json(terraform, 'terraform')
        if json_data:
            save_json_to_file(json_data, output)
コード例 #5
0
 def parse(self):
     """
     parse the template and return the generated template JSON.
     """
     gen_template_json = self.generate_template_json()
     if self.tosave:
         file_name = os.path.splitext(self.get_template())[0] + '_gen.json'
         save_json_to_file(gen_template_json, file_name)
     return gen_template_json
コード例 #6
0
 def process_rego_test_case(self):
     inputjson = {}
     result = False
     opa_exe = opa_binary()
     if not opa_exe:
         # print('*' * 50)
         return result
     rule_expr = get_field_value(self.testcase, 'eval')
     if not rule_expr:
         rule_expr = 'data.rule.rulepass'
     if len(self.testcase['snapshotId']) == 1:
         sid = self.testcase['snapshotId'][0]
         inputjson = self.get_snaphotid_doc(sid)
     else:
         ms_id = dict(
             zip(self.testcase['snapshotId'],
                 self.testcase['masterSnapshotId']))
         for sid in self.testcase['snapshotId']:
             inputjson.update({ms_id[sid]: self.get_snaphotid_doc(sid)})
     if inputjson:
         save_json_to_file(inputjson, '/tmp/input.json')
         rego_rule = self.rule
         rego_match = re.match(r'^file\((.*)\)$', rego_rule, re.I)
         if rego_match:
             # rego_file = get_rego_rule_filename(rego_match.groups()[0], self.container)
             rego_file = self.rego_rule_filename(rego_match.groups()[0],
                                                 self.container)
             if rego_file:
                 pass
             else:
                 rego_file = None
             # rego_file1 = self.rego_rule_filename(rego_match.groups()[0], "google_crawler_container")
             # rego_file1 = self.rego_rule_filename('google_crawler.rego', "google_crawler_container")
             # print(rego_file1)
         else:
             rego_txt = [
                 "package rule", "default rulepass = false",
                 "rulepass = true{",
                 "   %s" % rego_rule, "}", ""
             ]
             rego_file = '/tmp/input.rego'
             open(rego_file, 'w').write('\n'.join(rego_txt))
         if rego_file:
             os.system(
                 '%s eval -i /tmp/input.json -d %s "%s" > /tmp/a.json' %
                 (opa_exe, rego_file, rule_expr))
             resultval = json_from_file('/tmp/a.json')
             if resultval:
                 resultbool = resultval['result'][0]['expressions'][0][
                     'value']  # [get_field_value(resultval, 'result[0].expressions[0].value')
                 if resultbool:
                     result = parsebool(resultbool)
         else:
             result = False
     return result
コード例 #7
0
def populate_snapshots_from_file(snapshot_file, container):
    """
    Each snapshot file from the filesystem is loaded as a json datastructue
     and populate all the nodes in this json datastructure.
    """
    file_name = '%s.json' % snapshot_file if snapshot_file and not \
        snapshot_file.endswith('.json') else snapshot_file
    snapshot_json_data = json_from_file(file_name)
    if not snapshot_json_data:
        logger.error("Snapshot file %s looks to be empty, next!...", snapshot_file)
        return {}
    logger.debug(json.dumps(snapshot_json_data, indent=2))
    snapshot_data = populate_snapshots_from_json(snapshot_json_data, container)
    save_json_to_file(snapshot_json_data, snapshot_file)
    return snapshot_data
コード例 #8
0
def generate_container_mastersnapshots_filesystem(container):
    """
    Using the mastersnapshot files from the container with storage system as filesystem.
    The path for looking into the container is configured in the config.ini, for the
    default location configuration is $SOLUTIONDIR/realm/validation/<container>
    """
    snapshots_status = {}
    snapshot_dir, snapshot_files = get_container_snapshot_json_files(
        container, mastersnapshot=True)
    if not snapshot_files:
        logger.error("No mastersnapshot files in %s, exiting!...",
                     snapshot_dir)
        return snapshots_status
    # logger.info('\n'.join(snapshot_files))
    snapshots = mastersnapshots_used_in_mastertests_filesystem(container)
    populated = []
    for snapshot_file in snapshot_files:
        logger.info('\tMASTERSNAPSHOT:%s', snapshot_file)
        parts = snapshot_file.rsplit('/', 1)
        if parts[-1] in snapshots:
            if parts[-1] not in populated:
                # Take the snapshot and crawl for the  resource types.
                snapshot_file_data, snapshot_json_data = generate_snapshots_from_mastersnapshot_file(
                    snapshot_file)
                file_name = '%s.json' % snapshot_file if snapshot_file and not snapshot_file.endswith(
                    '.json') else snapshot_file
                # snapshot_json_data = json_from_file(file_name)
                generate_snapshot(snapshot_json_data, snapshot_file_data)
                parts = file_name.rsplit('.', 1)
                new_file_name = '%s_gen.%s' % (parts[0], parts[1])
                save_json_to_file(snapshot_json_data, new_file_name)
                populated.append(parts[-1])
                name = parts[-1].replace(
                    '.json', '') if parts[-1].endswith('.json') else parts[-1]
                snapshots_status[name] = snapshot_file_data
        else:
            logger.error("No master testcase found for %s " % parts[-1])
    return snapshots_status
コード例 #9
0
def test_save_json_to_file(create_temp_dir):
    newpath = create_temp_dir()
    fname = '%s/a1.json' % newpath
    file_exists = os.path.exists(fname)
    assert False == file_exists
    save_json_to_file({}, fname)
    file_exists = os.path.exists(fname)
    assert True == file_exists
    os.remove(fname)
    save_json_to_file(None, fname)
    file_exists = os.path.exists(fname)
    assert False == file_exists
    save_json_to_file({'a': 'b'}, fname)
    file_exists = os.path.exists(fname)
    assert True == file_exists
    os.remove(fname)
    fname = '%s/a/a1.json' % newpath
    file_exists = os.path.exists(fname)
    assert False == file_exists
    save_json_to_file({'a': 'b'}, fname)
    file_exists = os.path.exists(fname)
    assert False == file_exists
コード例 #10
0
def save_currentdata(curr_data):
    """Save the key value rundata for further access, if None store it empty."""
    if not curr_data:
        curr_data = {}
    runctx = framework_currentdata()
    save_json_to_file(curr_data, runctx)
コード例 #11
0
def populate_sub_directory_snapshot(base_dir_path, sub_dir_path, snapshot,
                                    dbname, node, snapshot_data):
    dir_path = str('%s/%s' % (base_dir_path, sub_dir_path)).replace('//', '/')
    if exists_dir(dir_path):
        list_of_file = os.listdir(dir_path)
        template_file_path = ""
        deployment_file_path_list = []

        for entry in list_of_file:
            new_dir_path = ('%s/%s' % (dir_path, entry)).replace('//', '/')
            new_sub_directory_path = ('%s/%s' % (sub_dir_path, entry)).replace(
                '//', '/')
            if exists_dir(new_dir_path):
                populate_sub_directory_snapshot(base_dir_path,
                                                new_sub_directory_path,
                                                snapshot, dbname, node,
                                                snapshot_data)
            elif exists_file(new_dir_path):
                if len(entry.split(".")) > 0 and "json" in entry.split(
                        ".")[-1]:
                    json_data = json_from_file(new_dir_path)
                    if json_data and "$schema" in json_data:
                        if "deploymentTemplate.json" in json_data[
                                '$schema'].split("/")[-1]:
                            template_file_path = new_sub_directory_path
                        elif "deploymentParameters.json" in json_data[
                                '$schema'].split("/")[-1]:
                            deployment_file_path_list.append(
                                new_sub_directory_path)

        if template_file_path and deployment_file_path_list:

            location = get_field_value(node, 'location')
            new_deployment_file_path_list = []

            template_file_json_path = str(
                '%s/%s' % (base_dir_path, template_file_path)).replace(
                    '//', '/')
            for deployment_file_path in deployment_file_path_list:
                deployment_file_json_path = str(
                    '%s/%s' % (base_dir_path, deployment_file_path)).replace(
                        '//', '/')

                response = invoke_az_cli("deployment validate --location " +
                                         location + " --template-file " +
                                         template_file_json_path +
                                         " --parameters @" +
                                         deployment_file_json_path)

                if not response['error']:
                    new_deployment_file_path_list.append({
                        "path": deployment_file_path,
                        "status": "active"
                    })
                else:
                    new_deployment_file_path_list.append({
                        "path": deployment_file_path,
                        "status": "inactive"
                    })

            data_record = create_snapshot_record(
                snapshot, new_sub_directory_path, node, template_file_path,
                new_deployment_file_path_list)
            if node['masterSnapshotId'] not in snapshot_data or not isinstance(
                    snapshot_data[node['masterSnapshotId']], list):
                snapshot_data[node['masterSnapshotId']] = []

            snapshot_data[node['masterSnapshotId']] = snapshot_data[node[
                'masterSnapshotId']] + data_record['snapshots'][0]['nodes']
            if get_dbtests():
                insert_one_document(data_record, node['collection'], dbname)
            else:
                snapshot_file = '%s/%s' % (dir_path, "snapshot.json")
                save_json_to_file(data_record, snapshot_file)
コード例 #12
0
    def process_rego_test_case(self):
        tid = '%d_%s' % (int(time.time() * 1000000), generateid(None))
        results = []
        inputjson = {}
        result = False
        rule_expr = get_field_value(self.testcase, 'eval')
        if not rule_expr:
            rule_expr = get_field_value(self.testcase, 'evals')
            if rule_expr:
                del self.testcase['evals']
        if not rule_expr:
            rule_expr = 'data.rule.rulepass'
        testId = 'MISSING ID'
        if 'testId' in self.testcase:
            testId = self.testcase['testId']
        elif 'masterTestId' in self.testcase:
            testId = self.testcase['masterTestId']
        sid = self.testcase['snapshotId'][0]
        snapshot_doc = self.get_snaphotid_doc(sid)
                
        # logger.critical('\t\tEVAL: %s', rule_expr)
        opa_exe = opa_binary()        
        if not opa_exe:
            # print('*' * 50)
            logger.error('\t\tERROR: OPA binary not found!')
            logger.error('\t\tRESULT: FAILED')
            results.append({'eval': 'data.rule.rulepass', 'result': "passed" if result else "failed", 'message': ''})
            return results
        if len(self.testcase['snapshotId'])==1:
            # sid = self.testcase['snapshotId'][0]
            # inputjson = self.get_snaphotid_doc(sid)
            inputjson = snapshot_doc
            if inputjson is None:
                logger.info('\t\tERROR: Missing snapshot')
        else:
            ms_id = dict(zip(self.testcase['snapshotId'], self.testcase['masterSnapshotId']))
            for sid in self.testcase['snapshotId']:
                inputjson.update({ms_id[sid]: self.get_snaphotid_doc(sid)})
        results = []
        if inputjson:
            save_json_to_file(inputjson, '/tmp/input_%s.json' % tid)
            rego_rule = self.rule
            rego_match=re.match(r'^file\((.*)\)$', rego_rule, re.I)
            if rego_match:
                rego_file = self.rego_rule_filename(rego_match.groups()[0], self.container)
                if rego_file:
                    pass
                else:
                    rego_file = None
            else:
                rego_txt = [
                    "package rule",
                    "default rulepass = false",
                    "rulepass = true{",
                    "   %s" % rego_rule,
                    "}", ""
                ]
                rego_file = '/tmp/input_%s.rego' % tid
                open(rego_file, 'w').write('\n'.join(rego_txt))
            if rego_file:
                if isinstance(rule_expr, list):
                    result = os.system('%s eval -i /tmp/input_%s.json -d %s "data.rule" > /tmp/a_%s.json' % (opa_exe, tid, rego_file, tid))
                    if result != 0 :
                        self.log_compliance_info(testId)
                        logger.error("\t\tERROR: have problem in running opa binary")
                        self.log_rego_error(json_from_file("/tmp/a_%s.json" % tid, object_pairs_hook=None))
                else:
                    result = os.system('%s eval -i /tmp/input_%s.json -d %s "%s" > /tmp/a_%s.json' % (opa_exe, tid, rego_file, rule_expr, tid))
                    if result != 0 :
                        self.log_compliance_info(testId)
                        logger.error("\t\tERROR: have problem in running opa binary")
                        self.log_rego_error(json_from_file("/tmp/a_%s.json" % tid, object_pairs_hook=None))

                resultval = json_from_file('/tmp/a_%s.json' % tid)
                if resultval and "errors" in resultval and resultval["errors"]:
                    results.append({'eval': rule_expr, 'result': "failed", 'message': ''})
                    self.log_compliance_info(testId)
                    logger.critical('\t\tTITLE: %s', self.testcase.get('title', ""))
                    logger.critical('\t\tDESCRIPTION: %s', self.testcase.get('description', ""))
                    logger.critical('\t\tRULE: %s', self.testcase.get('rule', ""))
                    logger.critical('\t\tERROR: %s', str(resultval["errors"]))
                    logger.critical('\t\tREMEDIATION: %s', self.testcase.get('remediation_description', ""))
                    logger.error('\t\tRESULT: FAILED')
                    # logger.error(str(resultval["errors"]))
                elif resultval:
                    if isinstance(rule_expr, list):
                        resultdict = resultval['result'][0]['expressions'][0]['value']
                        for val in rule_expr:
                            if 'eval' in val: 
                                evalfield = val['eval'].rsplit('.', 1)[-1]
                                evalmessage = val['message'].rsplit('.', 1)[-1] if "message" in val else ""
                                if evalfield in resultdict:
                                    if isinstance(resultdict[evalfield], bool):
                                        result = parsebool(resultdict[evalfield])
                                    else:
                                        if logger.level == logging.DEBUG:
                                            self.log_compliance_info(testId)
                                            logger.warning('\t\tRESULT: SKIPPED')
                                        continue
                                elif evalmessage in resultdict:
                                    result = False
                                else:
                                    if logger.level == logging.DEBUG:
                                        self.log_compliance_info(testId)
                                        logger.warning('\t\tRESULT: SKIPPED')
                                    continue
                                msg = resultdict[evalmessage] if not result and evalmessage in resultdict else ""
                                results.append({
                                    'eval': val["eval"], 
                                    'result': "passed" if result else "failed", 
                                    'message': msg,
                                    'id' : val.get("id"),
                                    'remediation_description' : val.get("remediationDescription"),
                                    'remediation_function' : val.get("remediationFunction"),
                                })
                                # logger.info('\t\tERROR: %s', resultval)
                                self.log_compliance_info(testId)
                                self.log_result(results[-1])
                    else:
                        resultbool = resultval['result'][0]['expressions'][0]['value'] # [get_field_value(resultval, 'result[0].expressions[0].value')
                        result = parsebool(resultbool)
                        results.append({'eval': rule_expr, 'result': "passed" if result else "failed", 'message': ''})
                        # logger.info('\t\tERROR: %s', resultval)
                        self.log_compliance_info(testId)
                        self.log_result(results[-1])
                        if results[-1]['result'] == 'failed':
                            logger.error('\t\tERROR: %s', json.dumps(dict(resultval)))

            else:
                if logger.level == logging.DEBUG:
                    self.log_compliance_info(testId)
                    logger.info('\t\tERROR: %s missing', rego_match.groups()[0])
                    logger.warning('\t\tRESULT: SKIPPED')
                # results.append({'eval': rule_expr, 'result': "passed" if result else "failed", 'message': ''})
                # self.log_result(results[-1])
            remove_file('/tmp/input_%s.json' % tid)
            remove_file('/tmp/a_%s.json' % tid)
        else:
            results.append({'eval': rule_expr, 'result': "passed" if result else "failed", 'message': ''})
            self.log_result(results[-1])
        return results