def get_modified_files_for_testing(git_diff: str) -> ModifiedFiles:
    """
    Gets git diff string and filters those files into tests:

    Args:
        git_diff: a git diff output (with --name-only flag)
    Returns:
        ModifiedFiles instance
    """
    types_to_files: Dict[FileType, Set[str]] = create_type_to_file(git_diff)  # Mapping of the files FileType: file path

    # Checks if any common file exists in types_to_file
    changed_common_files = get_common_files(types_to_files.get(FileType.SCRIPT, set()))
    types_to_files = remove_common_files(types_to_files, changed_common_files)
    # Sample tests are the remaining python files
    sample_tests = types_to_files.get(FileType.PYTHON_FILE, set())

    # Modified files = YMLs of integrations, scripts and playbooks
    modified_files: Set[str] = types_to_files.get(FileType.INTEGRATION, set()).union(
        types_to_files.get(FileType.SCRIPT, set()),
        types_to_files.get(FileType.BETA_INTEGRATION, set()),
        types_to_files.get(FileType.PLAYBOOK, set()))  # Modified YMLs for testing (Integrations, Scripts, Playbooks).

    # Metadata packs
    modified_metadata: Set[str] = set()
    for file_path in types_to_files.get(FileType.METADATA, set()):
        if pack_name := tools.get_pack_name(file_path):
            modified_metadata.add(pack_name)
Example #2
0
    def get_packs_with_added_release_notes(added_files):
        added_rn = set()
        for file in added_files:
            if find_type(path=file) == FileType.RELEASE_NOTES:
                added_rn.add(get_pack_name(file))

        return added_rn
Example #3
0
    def is_valid_file(self, validate_rn=True):
        # type: (bool) -> bool
        """Check whether the script is valid or not"""
        is_script_valid = all([
            super().is_valid_file(validate_rn),
            self.validate_readme_exists(self.validate_all),
            self.is_valid_subtype(),
            self.is_id_equals_name(),
            self.is_docker_image_valid(),
            self.is_valid_pwsh(),
            self.is_valid_script_file_path(),
            self.is_there_separators_in_names(),
            self.name_not_contain_the_type(),
            self.runas_is_not_dbtrole(),
        ])
        # check only on added files
        if not self.old_file:
            is_script_valid = all([
                is_script_valid,
                self.is_valid_name()
            ])
        core_packs_list = get_core_pack_list()

        pack = get_pack_name(self.file_path)
        is_core = True if pack in core_packs_list else False
        if is_core:
            is_script_valid = all([
                is_script_valid,
                self.no_incident_in_core_pack()
            ])
        return is_script_valid
Example #4
0
    def run_validation_on_specific_files(self):
        """Run validations only on specific files
        """
        files_validation_result = set()

        for path in self.file_path.split(','):
            error_ignore_list = self.get_error_ignore_list(get_pack_name(path))

            if os.path.isfile(path):
                click.secho('\n================= Validating file =================', fg="bright_cyan")
                files_validation_result.add(self.run_validations_on_file(path, error_ignore_list))

            else:
                path = path.rstrip('/')
                dir_name = os.path.basename(path)
                if dir_name in CONTENT_ENTITIES_DIRS:
                    click.secho(f'\n================= Validating content directory {path} =================',
                                fg="bright_cyan")
                    files_validation_result.add(self.run_validation_on_content_entities(path, error_ignore_list))
                else:
                    if os.path.basename(os.path.dirname(path)) == PACKS_DIR:
                        click.secho(f'\n================= Validating pack {path} =================',
                                    fg="bright_cyan")
                        files_validation_result.add(self.run_validations_on_pack(path))

                    else:
                        click.secho(f'\n================= Validating package {path} =================',
                                    fg="bright_cyan")
                        files_validation_result.add(self.run_validation_on_package(path, error_ignore_list))

        return all(files_validation_result)
Example #5
0
    def __init__(self, pack_path: str, update_type: Union[str, None], modified_files_in_pack: set, added_files: set,
                 specific_version: str = None, pre_release: bool = False, pack: str = None,
                 pack_metadata_only: bool = False, text: str = '', existing_rn_version_path: str = ''):
        self.pack = pack if pack else get_pack_name(pack_path)
        self.update_type = update_type
        self.pack_meta_file = PACKS_PACK_META_FILE_NAME
        try:
            self.pack_path = pack_name_to_path(self.pack)
        except TypeError:
            click.secho(f'Please verify the pack path is correct: {self.pack}.', fg='red')
            sys.exit(1)
        # renamed files will appear in the modified list as a tuple: (old path, new path)
        modified_files_in_pack = {file_[1] if isinstance(file_, tuple) else file_ for file_ in modified_files_in_pack}
        self.modified_files_in_pack = set()
        for file_path in modified_files_in_pack:
            self.modified_files_in_pack.add(self.check_for_release_notes_valid_file_path(file_path))

        self.added_files = added_files
        self.pre_release = pre_release
        self.specific_version = specific_version
        self.existing_rn_changed = False
        self.text = text
        self.existing_rn_version_path = existing_rn_version_path
        self.should_delete_existing_rn = False
        self.pack_metadata_only = pack_metadata_only

        self.metadata_path = os.path.join(self.pack_path, 'pack_metadata.json')
        self.master_version = self.get_master_version()
Example #6
0
    def __init__(self, input: str, output: str = '', force: bool = False, marketplace: Optional[str] = None):
        """
        Init a GenericModuleUnifier
        Args:
            input: a path of the GenericModule file to unify.
            output: The output dir to write the unified GenericModule json to.
            force: if True - Forcefully overwrites the preexisting unified GenericModule file if one exists.
        """

        self.input_path = input
        self.pack_name = get_pack_name(file_path=self.input_path)
        self.pack_path = os.path.join(PACKS_DIR, self.pack_name)

        self.input_file_name = os.path.basename(self.input_path).rstrip('.json')
        self.use_force = force
        self.marketplace = marketplace
        if marketplace:
            MARKETPLACE_TAG_PARSER.marketplace = marketplace

        if output:
            if not os.path.isdir(output):
                click.secho('You have failed to provide a legal dir path', fg='bright_red')
                sys.exit(1)

            self.dest_dir = output

        else:
            # an output wasn't given, save the unified file in the input's file dir
            self.dest_dir = os.path.dirname(self.input_path)

        self.dest_path = os.path.join(self.dest_dir, f'{self.input_file_name}_unified.json')
Example #7
0
def get_classifier_data(path):
    data = OrderedDict()
    json_data = get_json(path)

    id_ = json_data.get('id')
    name = json_data.get('name', '')
    fromversion = json_data.get('fromVersion')
    toversion = json_data.get('toVersion')
    pack = get_pack_name(path)
    incidents_types = set()

    default_incident_type = json_data.get('defaultIncidentType')
    if default_incident_type and default_incident_type != '':
        incidents_types.add(default_incident_type)
    key_type_map = json_data.get('keyTypeMap', {})
    for key, value in key_type_map.items():
        incidents_types.add(value)

    if name:
        data['name'] = name
    data['file_path'] = path
    if toversion:
        data['toversion'] = toversion
    if fromversion:
        data['fromversion'] = fromversion
    if pack:
        data['pack'] = pack
    if incidents_types:
        data['incident_types'] = list(incidents_types)

    return {id_: data}
Example #8
0
def update_pack_releasenotes(**kwargs):
    _pack = kwargs.get('pack')
    update_type = kwargs.get('update_type')
    pre_release = kwargs.get('pre_release')
    is_all = kwargs.get('all')
    modified, added, old, _packs = FilesValidator(
        use_git=True).get_modified_and_added_files()
    packs_existing_rn = set()
    for pf in added:
        if 'ReleaseNotes' in pf:
            pack_with_existing_rn = get_pack_name(pf)
            packs_existing_rn.add(pack_with_existing_rn)
    if len(packs_existing_rn):
        existing_rns = ''.join(f"{p}, " for p in packs_existing_rn)
        print_warning(
            f"Found existing release notes for the following packs: {existing_rns.rstrip(', ')}"
        )
    if len(_packs) > 1:
        pack_list = ''.join(f"{p}, " for p in _packs)
        if not is_all:
            if _pack:
                pass
            else:
                print_error(
                    f"Detected changes in the following packs: {pack_list.rstrip(', ')}\n"
                    f"To update release notes in a specific pack, please use the -p parameter "
                    f"along with the pack name.")
                sys.exit(0)
    if len(modified) < 1:
        print_warning('No changes were detected.')
        sys.exit(0)
    if is_all and not _pack:
        packs = list(_packs - packs_existing_rn)
        packs_list = ''.join(f"{p}, " for p in packs)
        print_warning(
            f"Adding release notes to the following packs: {packs_list.rstrip(', ')}"
        )
        for pack in packs:
            update_pack_rn = UpdateRN(pack=pack,
                                      update_type=update_type,
                                      pack_files=modified,
                                      pre_release=pre_release)
            update_pack_rn.execute_update()
    elif is_all and _pack:
        print_error(
            "Please remove the --all flag when specifying only one pack.")
        sys.exit(0)
    else:
        if _pack:
            if _pack in packs_existing_rn:
                print_error(
                    f"New release notes file already found for {_pack}. "
                    f"Please update manually or delete {pack_name_to_path(_pack)}"
                )
            else:
                update_pack_rn = UpdateRN(pack=_pack,
                                          update_type=update_type,
                                          pack_files=modified,
                                          pre_release=pre_release)
                update_pack_rn.execute_update()
Example #9
0
def get_incident_type_data(path):
    data = OrderedDict()
    json_data = get_json(path)

    id_ = json_data.get('id')
    name = json_data.get('name', '')
    fromversion = json_data.get('fromVersion')
    toversion = json_data.get('toVersion')
    playbook_id = json_data.get('playbookId')
    pre_processing_script = json_data.get('preProcessingScript')
    pack = get_pack_name(path)

    if name:
        data['name'] = name
    data['file_path'] = path
    if toversion:
        data['toversion'] = toversion
    if fromversion:
        data['fromversion'] = fromversion
    if pack:
        data['pack'] = pack
    if playbook_id and playbook_id != '':
        data['playbooks'] = playbook_id
    if pre_processing_script and pre_processing_script != '':
        data['scripts'] = pre_processing_script

    return {id_: data}
Example #10
0
def get_layout_data(path):
    data = OrderedDict()
    json_data = get_json(path)
    layout = json_data.get('layout')
    name = layout.get('name', '-')
    id_ = json_data.get('id', layout.get('id', '-'))
    type_ = json_data.get('typeId')
    type_name = json_data.get('TypeName')
    fromversion = json_data.get('fromVersion')
    toversion = json_data.get('toVersion')
    kind = json_data.get('kind')
    pack = get_pack_name(path)

    if type_:
        data['typeID'] = type_
    if type_name:
        data['typename'] = type_name
    data['name'] = name
    if toversion:
        data['toversion'] = toversion
    if fromversion:
        data['fromversion'] = fromversion
    if pack:
        data['pack'] = pack
    if kind:
        data['kind'] = kind
    data['path'] = path

    return {id_: data}
    def is_valid_file(self, validate_rn=True, is_new_file=False, use_git=False, is_added_file=False):
        """Check whether the Incident Field is valid or not
        """
        answers = [
            super().is_valid_file(validate_rn),
            self.is_valid_type(),
            self.is_valid_group(),
            self.is_valid_content_flag(),
            self.is_valid_system_flag(),
            self.is_valid_cli_name(),
            self.is_valid_version(),
            self.is_valid_required(),
            self.does_not_have_empty_select_values(),
            self.is_aliased_fields_are_valid(),
        ]

        core_packs_list = get_core_pack_list()

        pack = get_pack_name(self.file_path)
        is_core = pack in core_packs_list
        if is_core:
            answers.append(self.is_valid_name())
        if is_new_file and use_git:
            answers.append(self.is_valid_field_name_prefix())
        if is_added_file:
            answers.append(self.is_valid_unsearchable_key())
        return all(answers)
    def are_release_notes_complete(self):
        is_valid = True
        modified_added_files = itertools.chain.from_iterable(
            (self.added_files or [], self.modified_files or []))
        if modified_added_files:
            for file in modified_added_files:
                # renamed files will appear in the modified list as a tuple: (old path, new path)
                if isinstance(file, tuple):
                    file = file[1]
                checked_file_pack_name = get_pack_name(file)

                if find_type(file) in SKIP_RELEASE_NOTES_FOR_TYPES:
                    continue
                elif checked_file_pack_name and checked_file_pack_name == self.pack_name:
                    # Refer image and description file paths to the corresponding yml files
                    file = UpdateRN.change_image_or_desc_file_path(file)
                    update_rn_util = UpdateRN(pack_path=self.pack_path,
                                              modified_files_in_pack=set(),
                                              update_type=None,
                                              added_files=set(),
                                              pack=self.pack_name)
                    file_name, file_type = update_rn_util.get_changed_file_name_and_type(
                        file)
                    if file_name and file_type:
                        if (RN_HEADER_BY_FILE_TYPE[file_type] not in self.latest_release_notes) or \
                                (file_name not in self.latest_release_notes):
                            entity_name = update_rn_util.get_display_name(file)
                            error_message, error_code = Errors.missing_release_notes_entry(
                                file_type, self.pack_name, entity_name)
                            if self.handle_error(error_message, error_code,
                                                 self.release_notes_file_path):
                                is_valid = False
        return is_valid
    def create_output_dirs(self):
        # no output given
        if not self.dashboard_dir:
            # check if in content repository
            if not is_external_repository() and self.autocreate_dir:
                pack_name = get_pack_name(self.input)
                if self.logging:
                    click.echo(
                        f"No output path given creating Dashboards and GenericModules "
                        f"directories in pack {pack_name}")
                pack_path = os.path.join(PACKS_DIR, pack_name)
                self.dashboard_dir = os.path.join(pack_path, DASHBOARDS_DIR)
                self.module_dir = os.path.join(pack_path, GENERIC_MODULES_DIR)

                # create the dirs, dont fail if exist
                self.create_output_dir(self.dashboard_dir)
                self.create_output_dir(self.module_dir)

            # if not in content create the files locally
            else:
                if self.logging:
                    click.echo(
                        "No output path given and not running in content repo, creating "
                        "files in the current working directory")
                self.dashboard_dir = '.'
                self.module_dir = '.'
Example #14
0
    def validate_release_notes(self, file_path, added_files, modified_files, pack_error_ignore_list, is_modified):
        pack_name = get_pack_name(file_path)

        # modified existing RN
        if is_modified:
            error_message, error_code = Errors.modified_existing_release_notes(pack_name)
            if self.handle_error(error_message=error_message, error_code=error_code, file_path=file_path):
                return False

        # added new RN to a new pack
        if pack_name in self.new_packs:
            error_message, error_code = Errors.added_release_notes_for_new_pack(pack_name)
            if self.handle_error(error_message=error_message, error_code=error_code, file_path=file_path):
                return False

        if pack_name != 'NonSupported':
            if not added_files:
                added_files = {file_path}

            release_notes_validator = ReleaseNotesValidator(file_path, pack_name=pack_name,
                                                            modified_files=modified_files,
                                                            added_files=added_files,
                                                            ignored_errors=pack_error_ignore_list,
                                                            print_as_warnings=self.print_ignored_errors)
            return release_notes_validator.is_file_valid()

        return True
Example #15
0
 def __init__(self, file_path: str, ignored_errors=None, print_as_warnings=False,
              suppress_print=False, json_file_path=None, maximum_image_size: Optional[int] = None, specific_validations=None):
     super().__init__(file_path=file_path, ignored_errors=ignored_errors,
                      print_as_warnings=print_as_warnings, suppress_print=suppress_print,
                      json_file_path=json_file_path, specific_validations=specific_validations)
     self.pack_path = os.path.join(PACKS_DIR, get_pack_name(file_path))
     self.maximum_image_size = maximum_image_size if maximum_image_size else self.IMAGE_MAX_SIZE
Example #16
0
def get_playbook_data(file_path):
    playbook_data = OrderedDict()
    data_dictionary = get_yaml(file_path)
    id_ = data_dictionary.get('id', '-')
    name = data_dictionary.get('name', '-')

    deprecated = data_dictionary.get('deprecated', False)
    tests = data_dictionary.get('tests')
    toversion = data_dictionary.get('toversion')
    fromversion = data_dictionary.get('fromversion')
    implementing_scripts = get_task_ids_from_playbook('scriptName', data_dictionary)
    implementing_playbooks = get_task_ids_from_playbook('playbookName', data_dictionary)
    command_to_integration = get_commmands_from_playbook(data_dictionary)
    pack = get_pack_name(file_path)

    playbook_data['name'] = name
    playbook_data['file_path'] = file_path
    if toversion:
        playbook_data['toversion'] = toversion
    if fromversion:
        playbook_data['fromversion'] = fromversion
    if implementing_scripts:
        playbook_data['implementing_scripts'] = implementing_scripts
    if implementing_playbooks:
        playbook_data['implementing_playbooks'] = implementing_playbooks
    if command_to_integration:
        playbook_data['command_to_integration'] = command_to_integration
    if tests:
        playbook_data['tests'] = tests
    if deprecated:
        playbook_data['deprecated'] = deprecated
    if pack:
        playbook_data['pack'] = pack

    return {id_: playbook_data}
Example #17
0
def get_layoutscontainer_data(path):
    json_data = get_json(path)
    layouts_container_fields = ["group", "edit", "indicatorsDetails", "indicatorsQuickView", "quickView", "close",
                                "details", "detailsV2", "mobile", "name"]
    data = OrderedDict({field: json_data[field] for field in layouts_container_fields if json_data.get(field)})

    id_ = json_data.get('id')
    pack = get_pack_name(path)
    incident_indicator_types_dependency = {id_}
    incident_indicator_fields_dependency = get_values_for_keys_recursively(json_data, ['fieldId'])

    if data.get('name'):
        incident_indicator_types_dependency.add(data['name'])
    if json_data.get('toVersion'):
        data['toversion'] = json_data['toVersion']
    if json_data.get('fromVersion'):
        data['fromversion'] = json_data['fromVersion']
    if pack:
        data['pack'] = pack
    data['file_path'] = path
    data['incident_and_indicator_types'] = list(incident_indicator_types_dependency)
    if incident_indicator_fields_dependency['fieldId']:
        data['incident_and_indicator_fields'] = incident_indicator_fields_dependency['fieldId']

    return {id_: data}
Example #18
0
def get_integration_data(file_path):
    integration_data = OrderedDict()
    data_dictionary = get_yaml(file_path)

    is_unified_integration = data_dictionary.get('script', {}).get('script', '') != '-'

    id_ = data_dictionary.get('commonfields', {}).get('id', '-')
    name = data_dictionary.get('name', '-')

    deprecated = data_dictionary.get('deprecated', False)
    tests = data_dictionary.get('tests')
    toversion = data_dictionary.get('toversion')
    fromversion = data_dictionary.get('fromversion')
    commands = data_dictionary.get('script', {}).get('commands', [])
    cmd_list = [command.get('name') for command in commands]
    pack = get_pack_name(file_path)
    integration_api_modules = get_integration_api_modules(file_path, data_dictionary, is_unified_integration)
    default_classifier = data_dictionary.get('defaultclassifier')
    default_incident_type = data_dictionary.get('defaultIncidentType')
    is_feed = data_dictionary.get('feed')
    mappers = set()

    deprecated_commands = []
    for command in commands:
        if command.get('deprecated', False):
            deprecated_commands.append(command.get('name'))

    for mapper in ['defaultmapperin', 'defaultmapperout']:
        if data_dictionary.get(mapper):
            mappers.add(data_dictionary.get(mapper))

    integration_data['name'] = name
    integration_data['file_path'] = file_path
    if toversion:
        integration_data['toversion'] = toversion
    if fromversion:
        integration_data['fromversion'] = fromversion
    if cmd_list:
        integration_data['commands'] = cmd_list
    if tests:
        integration_data['tests'] = tests
    if deprecated:
        integration_data['deprecated'] = deprecated
    if deprecated_commands:
        integration_data['deprecated_commands'] = deprecated_commands
    if pack:
        integration_data['pack'] = pack
    if integration_api_modules:
        integration_data['api_modules'] = integration_api_modules
    if default_classifier and default_classifier != '':
        integration_data['classifiers'] = default_classifier
    if mappers:
        integration_data['mappers'] = list(mappers)
    if default_incident_type and default_incident_type != '':
        integration_data['incident_types'] = default_incident_type
    if is_feed:
        integration_data['indicator_fields'] = "CommonTypes"
        integration_data['indicator_types'] = "CommonTypes"

    return {id_: integration_data}
Example #19
0
def get_modified_files_for_testing(
    git_diff: str,
) -> Tuple[List[str], List[str], List[str], bool, List[str], set, bool, bool]:
    """
    Gets git diff string and filters those files into tests:

    Args:
        git_diff: a git diff output (with --name-only flag)
    Returns:
        modified_files: Modified YMLs for testing (Integrations, Scripts, Playbooks).
        modified_tests: Test playbooks.
        changed_common_files: Globally used YMLs (Like CommonServerPython).
        is_conf_json: If Tests/Conf.json has been changed.
        sample_tests: Files to test, Like the infrastructures files.
        modified_metadata: Pack names of changed metadata files.
        is_reputations_json: If any reputation file changed.
        is_indicator_json: If any indicator file changed.
    """
    types_to_files: Dict[FileType, Set[str]] = create_type_to_file(
        git_diff)  # Mapping of the files FileType: file path

    # Checks if any common file exists in types_to_file
    changed_common_files = get_common_files(
        types_to_files.get(FileType.SCRIPT, set()))
    types_to_files = remove_common_files(types_to_files, changed_common_files)
    # Sample tests are the remaining python files
    sample_tests = types_to_files.get(FileType.PYTHON_FILE, set())

    # Modified files = YMLs of integrations, scripts and playbooks
    modified_files: Set[str] = types_to_files.get(
        FileType.INTEGRATION, set()).union(
            types_to_files.get(FileType.SCRIPT, set()),
            types_to_files.get(FileType.PLAYBOOK, set())
        )  # Modified YMLs for testing (Integrations, Scripts, Playbooks).

    # Metadata packs
    modified_metadata: Set[str] = set()
    for file_path in types_to_files.get(FileType.METADATA, set()):
        modified_metadata.add(tools.get_pack_name(file_path))

    modified_tests: Set[str] = types_to_files.get(
        FileType.TEST_PLAYBOOK, set())  # Modified tests are test playbooks

    # Booleans. If this kind of file is inside, its exists
    is_conf_json = FileType.CONF_JSON in types_to_files

    is_reputations_json = FileType.REPUTATION in types_to_files

    is_indicator_json = FileType.INDICATOR_FIELD in types_to_files

    return (
        list(modified_files),
        list(modified_tests),
        list(changed_common_files),
        is_conf_json,
        list(sample_tests),
        modified_metadata,
        is_reputations_json,
        is_indicator_json,
    )
    def update_checked_flags_by_support_level(self, file_path):
        pack_name = get_pack_name(file_path)
        if pack_name:
            metadata_path = os.path.join(PACKS_DIR, pack_name,
                                         PACKS_PACK_META_FILE_NAME)
            metadata_json = self.get_metadata_file_content(metadata_path)
            support = metadata_json.get(PACK_METADATA_SUPPORT)

            if support in ('partner', 'community'):
                self.add_flag_to_ignore_list(file_path, support)
Example #21
0
def get_packs(changed_files):
    packs = set()
    for changed_file in changed_files:
        if isinstance(changed_file, tuple):
            changed_file = changed_file[1]
        pack = get_pack_name(changed_file)
        if pack:
            packs.add(pack)

    return packs
Example #22
0
    def check_existing_rn(self, added_files: set):
        """
            Checks whether the packs already have an existing release notes files and adds
            them to the packs_existing_rn dictionary.

            :param added_files: A set of new added files
        """
        for file_path in added_files:
            if 'ReleaseNotes' in file_path:
                self.packs_existing_rn[get_pack_name(file_path)] = file_path
def get_modified_files_for_testing(git_diff: str) -> ModifiedFiles:
    """
    Gets git diff string and filters those files into tests:

    Args:
        git_diff: a git diff output (with --name-only flag)
    Returns:
        ModifiedFiles instance
    """
    types_to_files: Dict[FileType, Set[str]] = create_type_to_file(
        git_diff)  # Mapping of the files FileType: file path

    # Checks if any common file exists in types_to_file
    changed_common_files = get_common_files(
        types_to_files.get(FileType.SCRIPT, set()))
    types_to_files = remove_common_files(types_to_files, changed_common_files)
    # Sample tests are the remaining python files
    sample_tests = types_to_files.get(FileType.PYTHON_FILE, set())

    # Modified files = YMLs of integrations, scripts and playbooks
    modified_files: Set[str] = types_to_files.get(
        FileType.INTEGRATION, set()).union(
            types_to_files.get(FileType.SCRIPT, set()),
            types_to_files.get(FileType.BETA_INTEGRATION, set()),
            types_to_files.get(FileType.PLAYBOOK, set())
        )  # Modified YMLs for testing (Integrations, Scripts, Playbooks).

    # Metadata packs
    modified_metadata: Set[str] = set()
    for file_path in types_to_files.get(FileType.METADATA, set()):
        modified_metadata.add(tools.get_pack_name(file_path))

    modified_tests: Set[str] = types_to_files.get(
        FileType.TEST_PLAYBOOK, set())  # Modified tests are test playbooks

    # Booleans. If this kind of file is inside, its exists
    is_conf_json = FileType.CONF_JSON in types_to_files

    is_landing_page_sections_json = FileType.LANDING_PAGE_SECTIONS_JSON in types_to_files

    is_reputations_json = FileType.REPUTATION in types_to_files

    is_indicator_json = FileType.INDICATOR_FIELD in types_to_files

    modified_files_instance = ModifiedFiles(list(modified_files),
                                            list(modified_tests),
                                            list(changed_common_files),
                                            is_conf_json, list(sample_tests),
                                            modified_metadata,
                                            is_reputations_json,
                                            is_indicator_json,
                                            is_landing_page_sections_json)

    return modified_files_instance
Example #24
0
 def ignore_secrets(self, secrets):
     pack_dir = get_pack_name(self.full_output_path)
     try:
         with open(f'Packs/{pack_dir}/.secrets-ignore', 'a') as f:
             for secret in secrets:
                 f.write(secret)
                 f.write('\n')
     except FileNotFoundError:
         print_warning(
             "Could not find the .secrets-ignore file - make sure your path is correct"
         )
Example #25
0
    def validate_added_files(self, added_files, modified_files):
        click.secho(f'\n================= Running validation on newly added files =================',
                    fg="bright_cyan")

        valid_files = set()
        for file_path in added_files:
            pack_name = get_pack_name(file_path)
            valid_files.add(self.run_validations_on_file(file_path, self.get_error_ignore_list(pack_name),
                                                         is_modified=False, modified_files=modified_files,
                                                         added_files=added_files))
        return all(valid_files)
Example #26
0
    def get_packs(modified_files, added_files):
        packs = set()
        changed_files = modified_files.union(added_files)
        for changed_file in changed_files:
            if isinstance(changed_file, tuple):
                changed_file = changed_file[1]
            pack = get_pack_name(changed_file)
            if pack and is_file_path_in_pack(changed_file):
                packs.add(pack)

        return packs
Example #27
0
def get_playbook_data(file_path: str) -> dict:
    playbook_data = OrderedDict()
    data_dictionary = get_yaml(file_path)
    graph = build_tasks_graph(data_dictionary)

    id_ = data_dictionary.get('id', '-')
    name = data_dictionary.get('name', '-')
    deprecated = data_dictionary.get('deprecated', False)
    tests = data_dictionary.get('tests')
    toversion = data_dictionary.get('toversion')
    fromversion = data_dictionary.get('fromversion')

    implementing_scripts, implementing_scripts_skippable = get_task_ids_from_playbook('scriptName',
                                                                                      data_dictionary,
                                                                                      graph
                                                                                      )
    implementing_playbooks, implementing_playbooks_skippable = get_task_ids_from_playbook('playbookName',
                                                                                          data_dictionary,
                                                                                          graph
                                                                                          )
    command_to_integration, command_to_integration_skippable = get_commands_from_playbook(data_dictionary)
    skippable_tasks = (implementing_scripts_skippable + implementing_playbooks_skippable +
                       command_to_integration_skippable)
    pack = get_pack_name(file_path)
    dependent_incident_fields, dependent_indicator_fields = get_dependent_incident_and_indicator_fields(data_dictionary)

    playbook_data['name'] = name
    playbook_data['file_path'] = file_path
    if toversion:
        playbook_data['toversion'] = toversion
    if fromversion:
        playbook_data['fromversion'] = fromversion

    if implementing_scripts:
        playbook_data['implementing_scripts'] = implementing_scripts
    if implementing_playbooks:
        playbook_data['implementing_playbooks'] = implementing_playbooks
    if command_to_integration:
        playbook_data['command_to_integration'] = command_to_integration
    if tests:
        playbook_data['tests'] = tests
    if deprecated:
        playbook_data['deprecated'] = deprecated
    if pack:
        playbook_data['pack'] = pack
    if skippable_tasks:
        playbook_data['skippable_tasks'] = skippable_tasks
    if dependent_incident_fields:
        playbook_data['incident_fields'] = list(dependent_incident_fields)
    if dependent_indicator_fields:
        playbook_data['indicator_fields'] = list(dependent_indicator_fields)
    return {id_: playbook_data}
Example #28
0
    def has_unskipped_test_playbook(self,
                                    current_file,
                                    entity_id,
                                    file_path,
                                    test_playbook_ids=None):
        """Check if the content entity has at least one unskipped test playbook.

        Collect test playbook ids from the `tests` field in the file, merge them with
        provided test_playbook_ids and validate at least one is unskipped.

        Args:
            current_file: The file to check.
            entity_id: The id of the entity to check.
            file_path: The file path of the entity to check.
            test_playbook_ids: test_playbook_ids unrelated to `tests` field in the file.

        Returns:
            True if the content entity has at least one unskipped test playbook.
        """
        # If it has a dynamic section tag, it shouldn't have a test playbook.
        if self.DYNAMIC_SECTION_TAG in current_file.get('tags', []):
            return True
        if test_playbook_ids is None:
            test_playbook_ids = []
        test_playbooks_unskip_status = {}
        all_test_playbook_ids = test_playbook_ids.copy()
        skipped_tests = self.conf_data.get('skipped_tests', {})

        # do not check this validation for ApiModules pack
        if get_pack_name(file_path) == API_MODULES_PACK:
            return self._is_valid

        if isinstance(current_file.get('tests'), list):
            all_test_playbook_ids.extend(current_file.get('tests', []))

        for test_playbook_id in set(all_test_playbook_ids):
            if (skipped_tests and test_playbook_id
                    in skipped_tests) or 'No test' in test_playbook_id:
                test_playbooks_unskip_status[test_playbook_id] = False
            else:
                test_playbooks_unskip_status[test_playbook_id] = True

        if not any(test_playbooks_unskip_status.values()
                   ) and not self.has_unittest(file_path):
            error_message, error_code = Errors.all_entity_test_playbooks_are_skipped(
                entity_id)
            if self.handle_error(error_message,
                                 error_code,
                                 file_path=file_path):
                self._is_valid = False
        return self._is_valid
    def update_checked_flags_by_support_level(self, file_path):
        pack_name = get_pack_name(file_path)
        if pack_name:
            metadata_path = os.path.join(PACKS_DIR, pack_name, PACKS_PACK_META_FILE_NAME)
            metadata_json = self.get_metadata_file_content(metadata_path)
            support = metadata_json.get(PACK_METADATA_SUPPORT)
            certification = metadata_json.get(PACK_METADATA_CERTIFICATION)

            if support == 'partner':
                if certification is not None and certification != 'certified':
                    self.add_flag_to_ignore_list(file_path, 'non-certified-partner')

            elif support == 'community':
                self.add_flag_to_ignore_list(file_path, 'community')
Example #30
0
def get_incident_field_data(path, incidents_types_list):
    data = OrderedDict()
    json_data = get_json(path)

    id_ = json_data.get('id')
    name = json_data.get('name', '')
    fromversion = json_data.get('fromVersion')
    toversion = json_data.get('toVersion')
    pack = get_pack_name(path)
    all_associated_types = set()
    all_scripts = set()

    associated_types = json_data.get('associatedTypes')
    if associated_types:
        all_associated_types = set(associated_types)

    system_associated_types = json_data.get('systemAssociatedTypes')
    if system_associated_types:
        all_associated_types = all_associated_types.union(
            set(system_associated_types))

    if 'all' in all_associated_types:
        all_associated_types = [
            list(incident_type.keys())[0]
            for incident_type in incidents_types_list
        ]

    scripts = json_data.get('script')
    if scripts:
        all_scripts = {scripts}

    field_calculations_scripts = json_data.get('fieldCalcScript')
    if field_calculations_scripts:
        all_scripts = all_scripts.union({field_calculations_scripts})

    if name:
        data['name'] = name
    data['file_path'] = path
    if toversion:
        data['toversion'] = toversion
    if fromversion:
        data['fromversion'] = fromversion
    if pack:
        data['pack'] = pack
    if all_associated_types:
        data['incident_types'] = list(all_associated_types)
    if all_scripts:
        data['scripts'] = list(all_scripts)

    return {id_: data}