コード例 #1
0
def create_file_release_notes(change_type, full_file_name):
    """
    Create release note for changed file.

    :param change_type: git change status (A, M, R*)
    :param full_file_name: path to file in repository
    :return: None
    """
    if isinstance(full_file_name, tuple):
        _, full_file_name = full_file_name

    is_pack = is_file_path_in_pack(full_file_name)
    if is_pack:
        file_type = full_file_name.split("/")[2]
    else:
        file_type = full_file_name.split("/")[0]
    base_name = os.path.basename(full_file_name)
    file_suffix = os.path.splitext(base_name)[-1]
    file_type_mapping = RELEASE_NOTE_GENERATOR.get(file_type)

    if file_type_mapping is None or file_suffix not in CONTENT_FILE_SUFFIXES:
        print_warning("Unsupported file type: {}".format(full_file_name))
        return

    if change_type != "R100":  # only file name has changed (no actual data was modified
        if 'R' in change_type:
            # handle the same as modified
            change_type = 'M'

        file_type_mapping.add(change_type, CONTENT_LIB_PATH + full_file_name)
コード例 #2
0
    def get_packs(modified_files, added_files):
        packs = set()
        changed_files = modified_files.union(added_files)
        for changed_file in changed_files:
            if isinstance(changed_file, tuple):
                changed_file = changed_file[1]
            pack = get_pack_name(changed_file)
            if pack and is_file_path_in_pack(changed_file):
                packs.add(pack)

        return packs
コード例 #3
0
def search_potential_secrets(secrets_file_paths: list):
    """Returns potential secrets(sensitive data) found in committed and added files
    :param secrets_file_paths: paths of files that are being commited to git repo
    :return: dictionary(filename: (list)secrets) of strings sorted by file name for secrets found in files
    """
    secrets_found = {}
    for file_path in secrets_file_paths:
        # Get if file path in pack and pack name
        is_pack = is_file_path_in_pack(file_path)
        pack_name = get_pack_name(file_path)
        # Get generic/ioc/files white list sets based on if pack or not
        secrets_white_list, ioc_white_list, files_white_list = get_white_listed_items(
            is_pack, pack_name)
        # Skip white listed files
        if file_path in files_white_list:
            print(
                "Skipping secrets detection for file: {} as it is white listed"
                .format(file_path))
            continue
        # Init vars for current loop
        file_name = os.path.basename(file_path)
        high_entropy_strings = []
        secrets_found_with_regex = []
        _, file_extension = os.path.splitext(file_path)
        skip_secrets = {'skip_once': False, 'skip_multi': False}
        # get file contents
        file_contents = get_file_contents(file_path, file_extension)
        # in packs regard all items as regex as well, reset pack's whitelist in order to avoid repetition later
        if is_pack:
            file_contents = remove_white_list_regex(file_contents,
                                                    secrets_white_list)
            secrets_white_list = set()
        yml_file_contents = get_related_yml_contents(file_path)
        # Add all context output paths keywords to whitelist temporary
        if file_extension == YML_FILE_EXTENSION or yml_file_contents:
            temp_white_list = create_temp_white_list(
                yml_file_contents if yml_file_contents else file_contents)
            secrets_white_list = secrets_white_list.union(temp_white_list)
        # Search by lines after strings with high entropy / IoCs regex as possibly suspicious
        for line in file_contents.split('\n'):
            # if detected disable-secrets comments, skip the line/s
            skip_secrets = is_secrets_disabled(line, skip_secrets)
            if skip_secrets['skip_once'] or skip_secrets['skip_multi']:
                skip_secrets['skip_once'] = False
                continue
            # REGEX scanning for IOCs and false positive groups
            regex_secrets, false_positives = regex_for_secrets(line)
            for regex_secret in regex_secrets:
                if not any(ioc.lower() in regex_secret.lower()
                           for ioc in ioc_white_list):
                    secrets_found_with_regex.append(regex_secret)
            # added false positives into white list array before testing the strings in line
            secrets_white_list = secrets_white_list.union(false_positives)
            # due to nature of eml files, skip string by string secret detection - only regex
            if file_extension in SKIP_FILE_TYPE_ENTROPY_CHECKS or \
                    any(demisto_type in file_name for demisto_type in SKIP_DEMISTO_TYPE_ENTROPY_CHECKS):
                continue
            line = remove_false_positives(line)
            # calculate entropy for each string in the file
            for string_ in line.split():
                # compare the lower case of the string against both generic whitelist & temp white list
                if not any(white_list_string.lower() in string_.lower()
                           for white_list_string in secrets_white_list):
                    entropy = calculate_shannon_entropy(string_)
                    if entropy >= ENTROPY_THRESHOLD:
                        high_entropy_strings.append(string_)

        if high_entropy_strings or secrets_found_with_regex:
            # uniquify identical matches between lists
            file_secrets = list(
                set(high_entropy_strings + secrets_found_with_regex))
            secrets_found[file_name] = file_secrets

    return secrets_found