def test_find_type_no_file(self): """ Given - A non existing file path. When - Running find_type. Then - Ensure None is returned """ madeup_path = 'some/path' output = find_type(madeup_path) assert not output
def copy_playbook_yml(self, path, out_path): """ Add "playbook-" prefix to playbook file's copy destination filename if it wasn't already present """ dest_dir_path = os.path.dirname(out_path) dest_file_name = os.path.basename(out_path) file_type = find_type(path) if not dest_file_name.startswith('playbook-') and ( file_type == FileType.PLAYBOOK or file_type == FileType.TEST_PLAYBOOK): new_name = '{}{}'.format('playbook-', dest_file_name) out_path = self.add_suffix_to_file_path( os.path.join(dest_dir_path, new_name)) shutil.copyfile(path, out_path) self.add_from_version_to_yml(out_path)
def __init__(self, input: str = '', output: str = '', path: str = '', from_version: str = '', no_validate: bool = False, verbose: bool = False, update_docker: bool = False, **kwargs): super().__init__(input, output, path, from_version, no_validate, verbose=verbose, **kwargs) description_type = input.replace('_description.md', '.yml') self.is_beta = False file_type = find_type(description_type) if file_type: self.is_beta = find_type( description_type).value == 'betaintegration' with open(self.source_file, 'r') as f: self.description_content = f.read()
def json_output(self, file_path: str, error_code: str, error_message: str, warning: bool) -> None: """Adds an error's info to the output JSON file Args: file_path (str): The file path where the error ocurred. error_code (str): The error code error_message (str): The error message warning (bool): Whether the error is defined as a warning """ if not self.json_file_path: return error_data = get_error_object(error_code) output = { 'severity': 'warning' if warning else 'error', 'errorCode': error_code, 'message': error_message, 'ui': error_data.get('ui_applicable'), 'relatedField': error_data.get('related_field') } json_contents = [] if os.path.exists(self.json_file_path): existing_json = get_json(self.json_file_path) if existing_json: json_contents = existing_json file_type = find_type(file_path) entity_type = file_type.value if file_type else 'pack' # handling unified yml image errors if entity_type == FileType.INTEGRATION.value and error_code.startswith( 'IM'): entity_type = FileType.IMAGE.value formatted_error_output = { 'filePath': file_path, 'fileType': os.path.splitext(file_path)[1].replace('.', ''), 'entityType': entity_type, 'errorType': 'Settings', 'name': get_file_displayed_name(file_path), **output } json_contents.append(formatted_error_output) with open(self.json_file_path, 'w') as f: json.dump(json_contents, f, indent=4)
def prepare_single_content_item_for_validation( filename: str, data: bytes, tmp_directory: str) -> Tuple[str, Dict]: content = Content(tmp_directory) pack_name = 'TmpPack' pack_dir = content.path / 'Packs' / pack_name # create pack_metadata.json file in TmpPack contrib_converter = ContributionConverter(name=pack_name, base_dir=tmp_directory, pack_dir_name=pack_name) contrib_converter.create_metadata_file({ 'description': 'Temporary Pack', 'author': 'xsoar' }) prefix = '-'.join(filename.split('-')[:-1]) containing_dir = pack_dir / ENTITY_TYPE_TO_DIR.get(prefix, 'Integrations') containing_dir.mkdir(exist_ok=True) is_json = filename.casefold().endswith('.json') data_as_string = data.decode() loaded_data = json.loads(data_as_string) if is_json else yaml.load( data_as_string) if is_json: data_as_string = json.dumps(loaded_data) else: buff = io.StringIO() yaml.dump(loaded_data, buff) data_as_string = buff.getvalue() # write content item file to file system file_path = containing_dir / filename file_path.write_text(data_as_string) file_type = find_type(str(file_path)) file_type = file_type.value if file_type else file_type if is_json or file_type in (FileType.PLAYBOOK.value, FileType.TEST_PLAYBOOK.value): return str(file_path), {} extractor = Extractor(input=str(file_path), file_type=file_type, output=containing_dir, no_logging=True, no_pipenv=True, no_basic_fmt=True) # validate the resulting package files, ergo set path_to_validate to the package directory that results # from extracting the unified yaml to a package format extractor.extract_to_package_format() code_fp_to_row_offset = { get_extracted_code_filepath(extractor): extractor.lines_inserted_at_code_start } return extractor.get_output_path(), code_fp_to_row_offset
def test_find_only_supported_files(self, valid_spelled_content_pack): """ Given - valid pack directory path. When - trying to find files from a directory. Then - Ensure the files that are found are only supported files. """ doc_review = DocReviewer(file_paths=[valid_spelled_content_pack.path]) doc_review.get_files_to_run_on( file_path=valid_spelled_content_pack.path) for file in doc_review.files: assert find_type(path=file) in doc_review.SUPPORTED_FILE_TYPES
def run(): new_conf_json_objects = [] existing_test_playbooks = load_test_data_from_conf_json() for pack_name in os.listdir(PACKS_DIR): pack_path = os.path.join(PACKS_DIR, pack_name) if not should_test_content_pack(pack_name): continue pack_integrations = [] pack_test_playbooks = [] integration_dir_path = os.path.join(pack_path, INTEGRATIONS_DIR) test_playbook_dir_path = os.path.join(pack_path, TEST_PLAYBOOKS_DIR) if not os.path.isdir(test_playbook_dir_path) or not os.listdir(test_playbook_dir_path): continue logging.info(f'Going over {pack_name}') if os.path.exists(integration_dir_path): for file_or_dir in os.listdir(integration_dir_path): if os.path.isdir(os.path.join(integration_dir_path, file_or_dir)): inner_dir_path = os.path.join(integration_dir_path, file_or_dir) for integration_file in os.listdir(inner_dir_path): is_yml_file = integration_file.endswith('.yml') file_path = os.path.join(inner_dir_path, integration_file) if is_yml_file: pack_integrations.append(get_integration_data(file_path)) else: is_yml_file = file_or_dir.endswith('.yml') file_path = os.path.join(integration_dir_path, file_or_dir) if is_yml_file: pack_integrations.append(get_integration_data(file_path)) for file_path in os.listdir(test_playbook_dir_path): is_yml_file = file_path.endswith('.yml') file_path = os.path.join(test_playbook_dir_path, file_path) if is_yml_file and find_type(file_path) == FileType.TEST_PLAYBOOK: test_playbook_id, fromversion = get_playbook_data(file_path) if test_playbook_id not in existing_test_playbooks: pack_test_playbooks.append((test_playbook_id, fromversion)) if pack_test_playbooks: new_conf_json_objects.extend(calc_conf_json_object(pack_integrations, pack_test_playbooks)) add_to_conf_json(new_conf_json_objects) logging.info(f'Added {len(new_conf_json_objects)} tests to the conf.json') logging.info(f'Added the following objects to the conf.json:\n{json.dumps(new_conf_json_objects, indent=4)}')
def is_valid_structure(self): """Check if the structure is valid for the case we are in, master - all files, branch - changed files. Returns: (bool). Whether the structure is valid or not. """ if self.validate_all: self.validate_all_files() return self._is_valid if self.validate_conf_json: if not self.conf_json_validator.is_valid_conf_json(): self._is_valid = False if self.use_git: if self.branch_name != 'master' and ( not self.branch_name.startswith('19.') and not self.branch_name.startswith('20.')): if not self.is_circle: print( 'Validating both committed and non-committed changed files' ) else: print('Validating committed changed files only') self.validate_committed_files() else: self.validate_against_previous_version(no_error=True) print( 'Validates all of Content repo directories according to their schemas' ) self.validate_all_files_schema() else: if self.file_path: if os.path.isfile(self.file_path): print('Not using git, validating file: {}'.format( self.file_path)) self.is_backward_check = False # if not using git, no need for BC checks self.validate_added_files({self.file_path}, file_type=find_type( self.file_path)) elif os.path.isdir(self.file_path): self.validate_pack() else: print('Not using git, validating all files.') self.validate_all_files_schema() return self._is_valid
def validate_readme_exists(self, validate_all: bool = False): """ Validates if there is a readme file in the same folder as the caller file. The validation is processed only on added or modified files. Args: validate_all: (bool) is the validation being run with -a Return: True if the readme file exits False with an error otherwise Note: APIModules don't need readme file (issue 47965). """ if validate_all or API_MODULES_PACK in self.file_path: return True file_path = os.path.normpath(self.file_path) path_split = file_path.split(os.sep) file_type = find_type(self.file_path, _dict=self.current_file, file_type='yml') if file_type == FileType.PLAYBOOK: to_replace = os.path.splitext(path_split[-1])[-1] readme_path = file_path.replace(to_replace, '_README.md') elif file_type in {FileType.SCRIPT, FileType.INTEGRATION}: if path_split[-2] in ['Scripts', 'Integrations']: to_replace = os.path.splitext(file_path)[-1] readme_path = file_path.replace(to_replace, '_README.md') else: to_replace = path_split[-1] readme_path = file_path.replace(to_replace, "README.md") else: return True if os.path.isfile(readme_path): return True error_message, error_code = Errors.missing_readme_file(file_type) if self.handle_error(error_message, error_code, file_path=self.file_path, suggested_fix=Errors.suggest_fix( self.file_path, cmd="generate-docs")): return False return True
def copy_test_files(self, test_playbooks_dir=TEST_PLAYBOOKS_DIR): """ Copy test playbook ymls to the test bundle. :param test_playbooks_dir: :return: None """ print('Copying test files to test bundle') scan_files = glob.glob(os.path.join(test_playbooks_dir, '*')) for path in scan_files: if os.path.isdir(path): non_circle_tests = glob.glob(os.path.join(path, '*')) for new_path in non_circle_tests: if os.path.isfile( new_path) and self.should_process_file_to_bundle( new_path, self.test_bundle): print(f'copying path {new_path}') new_file_path = self.add_suffix_to_file_path( os.path.join(self.test_bundle, os.path.basename(new_path))) shutil.copyfile(new_path, new_file_path) if new_file_path.endswith('yml'): self.add_from_version_to_yml(new_file_path) else: if not self.should_process_file_to_bundle( path, self.test_bundle): continue # test playbooks in test_playbooks_dir in packs can start without playbook* prefix # but when copied to the test_bundle, playbook-* prefix should be added to them file_type = find_type(path) path_basename = os.path.basename(path) if file_type in (FileType.SCRIPT, FileType.TEST_SCRIPT): if not path_basename.startswith('script-'): path_basename = f'script-{os.path.basename(path)}' elif file_type in (FileType.PLAYBOOK, FileType.TEST_PLAYBOOK): if not path_basename.startswith('playbook-'): path_basename = f'playbook-{os.path.basename(path)}' print(f'Copying path {path} as {path_basename}') new_file_path = self.add_suffix_to_file_path( os.path.join(self.test_bundle, path_basename)) shutil.copyfile(path, new_file_path) if new_file_path.endswith('yml'): self.add_from_version_to_yml(new_file_path)
def initiate_file_validator(self, validator_type): """ Run schema validate and file validate of file Returns: int 0 in case of success int 1 in case of error int 2 in case of skip """ if self.no_validate: if self.verbose: click.secho( f'Validator Skipped on file: {self.output_file} , no-validate flag was set.', fg='yellow') return SKIP_RETURN_CODE else: if self.verbose: print_color('Starting validating files structure', LOG_COLORS.GREEN) # validates only on files in content repo if self.relative_content_path: file_type = find_type(self.output_file) # validates on the output file generated from the format structure_validator = StructureValidator( self.output_file, predefined_scheme=file_type, suppress_print=not self.verbose) validator = validator_type(structure_validator, suppress_print=not self.verbose) # TODO: remove the connection condition if we implement a specific validator for connections. if structure_validator.is_valid_file() and \ (file_type == FileType.CONNECTION or validator.is_valid_file(validate_rn=False)): if self.verbose: click.secho('The files are valid', fg='green') return SUCCESS_RETURN_CODE else: if self.verbose: click.secho('The files are invalid', fg='red') return ERROR_RETURN_CODE else: if self.verbose: click.secho( f'The file {self.output_file} are not part of content repo, Validator Skipped', fg='yellow') return SKIP_RETURN_CODE
def get_script_or_integration_package_data(self): # should be static method _, yml_path = get_yml_paths_in_dir(self.package_path, error_msg='') if not yml_path: raise Exception( f'No yml files found in package path: {self.package_path}. ' 'Is this really a package dir?') if find_type(yml_path) in (FileType.SCRIPT, FileType.TEST_SCRIPT): code_type = get_yaml(yml_path).get('type') else: code_type = get_yaml(yml_path).get('script', {}).get('type') unifier = Unifier(self.package_path) code_path = unifier.get_code_file(TYPE_TO_EXTENSION[code_type]) with io.open(code_path, 'r', encoding='utf-8') as code_file: code = code_file.read() return yml_path, code
def validate_pack(self): """Validate files in a specified pack""" print_color(f'Validating {self.file_path}', LOG_COLORS.GREEN) pack_files = { file for file in glob(fr'{self.file_path}/**', recursive=True) if not os.path.isdir(file) } self.validate_pack_unique_files( glob(fr'{os.path.abspath(self.file_path)}')) for file in pack_files: # check if the file_path is part of test_data yml if is_test_file(file): continue # checks already in validate_pack_unique_files() if file.endswith('pack_metadata.json'): continue self.run_all_validations_on_file(file, file_type=find_type(file))
def validate_inputs_examples(input_path): if not input_path: print_error( 'To use the generate_integration_context version of this command please include an `input` argument') return 1 if input_path and not os.path.isfile(input_path): print_error(F'Input file {input_path} was not found.') return 1 if not input_path.lower().endswith('.yml'): print_error(F'Input {input_path} is not a valid yml file.') return 1 file_type = find_type(input_path, ignore_sub_categories=True) if file_type is not FileType.INTEGRATION: print_error('File is not an Integration.') return 1
def generate_readme_for_pack_content_item(self, yml_path: str) -> None: """ Runs the demisto-sdk's generate-docs command on a pack content item Args: yml_path: str: Content item yml path. """ file_type = find_type(yml_path) file_type = file_type.value if file_type else file_type if file_type == 'integration': generate_integration_doc(yml_path) if file_type == 'script': generate_script_doc(input_path=yml_path, examples=[]) if file_type == 'playbook': generate_playbook_doc(yml_path) dir_output = os.path.dirname(os.path.realpath(yml_path)) readme_path = os.path.join(dir_output, 'README.md') self.readme_files.append(readme_path)
def json_output(self, file_path: str, error_code: str, error_message: str, warning: bool) -> None: """Adds an error's info to the output JSON file Args: file_path (str): The file path where the error ocurred. error_code (str): The error code error_message (str): The error message warning (bool): Whether the error is defined as a warning """ if not self.json_file_path: return error_data = get_error_object(error_code) output = { "severity": "warning" if warning else "error", "code": error_code, "message": error_message, "ui": error_data.get('ui_applicable'), 'related-field': error_data.get('related_field') } if os.path.exists(self.json_file_path): json_contents = get_json(self.json_file_path) else: json_contents = {} file_type = find_type(file_path) if file_path in json_contents: if output in json_contents[file_path].get('outputs'): return json_contents[file_path]['outputs'].append(output) else: json_contents[file_path] = { "file-type": os.path.splitext(file_path)[1].replace('.', ''), "entity-type": file_type.value if file_type else 'pack', "display-name": get_file_displayed_name(file_path), "outputs": [output] } with open(self.json_file_path, 'w') as f: json.dump(json_contents, f, indent=4)
def find_dashboard_by_id(self, dashboard_id: str) -> Optional[Dict]: """ Search for a dashboard with the given id in the relevant pack path. Args: dashboard_id: dashboard id to search for Returns: if found - the content of the dashboard, else - None. """ dashboards_dir_path = f'{self.pack_path}/Dashboards/' for file_name in os.listdir(dashboards_dir_path): file_path = os.path.join(dashboards_dir_path, file_name) if find_type(file_path) == FileType.DASHBOARD: # it's a dashboard with open(file_path) as f: dashboard = json.load(f) if dashboard.get('id') == dashboard_id: # the searched dashboard was found return dashboard return None
def get_changed_file_name_and_type( self, file_path) -> Tuple[str, Optional[FileType]]: """ Gets the changed file name and type. :param file_path: The file path :rtype: ``str, FileType`` :return The changed file name and type """ _file_type = None file_name = 'N/A' if self.pack + '/' in file_path and ('README' not in file_path): _file_path = self.find_corresponding_yml(file_path) file_name = self.get_display_name(_file_path) _file_type = find_type(_file_path) return file_name, _file_type
def process_classifier(file_path: str, print_logs: bool) -> list: """ Process a classifier JSON file Args: file_path: The file path from Classifiers folder print_logs: Whether to print logs to stdout Returns: a list of classifier data. """ res = [] try: if find_type(file_path) in (FileType.CLASSIFIER, FileType.OLD_CLASSIFIER): if print_logs: print(f'adding {file_path} to id_set') res.append(get_classifier_data(file_path)) except Exception as exp: # noqa print_error(f'failed to process {file_path}, Error: {str(exp)}') raise return res
def update_tests(self) -> None: """ If there are no tests configured: Prompts a question to the cli that asks the user whether he wants to add 'No tests' under 'tests' key or not and format the file according to the answer """ if not self.data.get('tests', ''): # try to get the test playbook files from the TestPlaybooks dir in the pack pack_path = os.path.dirname( os.path.dirname(os.path.abspath(self.source_file))) test_playbook_dir_path = os.path.join(pack_path, TEST_PLAYBOOKS_DIR) test_playbook_ids = [] try: test_playbooks_files = os.listdir(test_playbook_dir_path) if test_playbooks_files: for file_path in test_playbooks_files: # iterate over the test playbooks in the dir is_yml_file = file_path.endswith('.yml') # concat as we might not be in content repo tpb_file_path = os.path.join(test_playbook_dir_path, file_path) if is_yml_file and find_type( tpb_file_path) == FileType.TEST_PLAYBOOK: test_playbook_data = get_yaml(tpb_file_path) test_playbook_id = get_entity_id_by_entity_type( test_playbook_data, content_entity='') test_playbook_ids.append(test_playbook_id) self.data['tests'] = test_playbook_ids except FileNotFoundError: pass if not test_playbook_ids: # In case no_interactive flag was given - modify the tests without confirmation if self.assume_yes: should_modify_yml_tests = True else: should_modify_yml_tests = click.confirm( f'The file {self.source_file} has no test playbooks ' f'configured. Do you want to configure it with "No tests"?' ) if should_modify_yml_tests: click.echo( f'Formatting {self.output_file} with "No tests"') self.data['tests'] = ['No tests (auto formatted)']
def process_indicator_fields(file_path: str, print_logs: bool) -> list: """ Process a indicator fields JSON file Args: file_path: The file path from indicator field folder print_logs: Whether to print logs to stdout Returns: a list of indicator field data. """ res = [] try: if find_type(file_path) == FileType.INDICATOR_FIELD: if print_logs: print(f'adding {file_path} to id_set') res.append(get_general_data(file_path)) except Exception as exp: # noqa print_error(f'failed to process {file_path}, Error: {str(exp)}') raise return res
def process_dashboards(file_path: str, print_logs: bool) -> list: """ Process a dashboard JSON file Args: file_path: The file path from Dashboard folder print_logs: Whether to print logs to stdout Returns: a list of dashboard data. """ res = [] try: if find_type(file_path) == FileType.DASHBOARD: if print_logs: print(f'adding {file_path} to id_set') res.append(get_general_data(file_path)) except Exception as exp: # noqa print_error(f'failed to process {file_path}, Error: {str(exp)}') raise return res
def process_script(file_path: str, print_logs: bool) -> list: res = [] try: if os.path.isfile(file_path): if find_type(file_path) == FileType.SCRIPT: if print_logs: print(f'adding {file_path} to id_set') res.append(get_script_data(file_path)) else: # package script unifier = Unifier(file_path) yml_path, code = unifier.get_script_or_integration_package_data() if print_logs: print(f'adding {file_path} to id_set') res.append(get_script_data(yml_path, script_code=code)) except Exception as exp: # noqa print_error(f'failed to process {file_path}, Error: {str(exp)}') raise return res
def validate_all_files(self): print('Validating all files') print('Validating conf.json') conf_json_validator = ConfJsonValidator() if not conf_json_validator.is_valid_conf_json(): self._is_valid = False packs = {os.path.basename(pack) for pack in glob(f'{PACKS_DIR}/*')} self.validate_pack_unique_files(packs) all_files_to_validate = set() for directory in [ PACKS_DIR, BETA_INTEGRATIONS_DIR, TEST_PLAYBOOKS_DIR ]: all_files_to_validate |= { file for file in glob(fr'{directory}/**', recursive=True) if not os.path.isdir(file) } print('Validating all Pack and Beta Integration files') for file in all_files_to_validate: self.run_all_validations_on_file(file, file_type=find_type(file))
def filter_staged_only(modified_files, added_files, old_format_files, changed_meta_files): """The function gets sets of files which were changed in the current branch and filters out only the files that were changed in the current commit""" all_changed_files = run_command('git diff --name-only --staged').split() formatted_changed_files = set() for changed_file in all_changed_files: if find_type(changed_file) in [ FileType.POWERSHELL_FILE, FileType.PYTHON_FILE ]: changed_file = os.path.splitext(changed_file)[0] + '.yml' formatted_changed_files.add(changed_file) modified_files = modified_files.intersection(formatted_changed_files) added_files = added_files.intersection(formatted_changed_files) old_format_files = old_format_files.intersection(formatted_changed_files) changed_meta_files = changed_meta_files.intersection( formatted_changed_files) return modified_files, added_files, old_format_files, changed_meta_files
def get_display_name(file_path) -> str: """ Gets the file name from the pack yml file. :param file_path: The pack yml file path :rtype: ``str`` :return The display name """ struct = StructureValidator(file_path=file_path, is_new_file=True, predefined_scheme=find_type(file_path)) file_data = struct.load_data_from_file() if 'display' in file_data: name = file_data.get('display', None) elif 'layout' in file_data and isinstance(file_data['layout'], dict): name = file_data['layout'].get('id') elif 'name' in file_data: name = file_data.get('name', None) elif 'TypeName' in file_data: name = file_data.get('TypeName', None) elif 'brandName' in file_data: name = file_data.get('brandName', None) elif 'id' in file_data: name = file_data.get('id', None) elif 'trigger_name' in file_data: name = file_data.get('trigger_name') elif 'dashboards_data' in file_data and file_data.get('dashboards_data') \ and isinstance(file_data['dashboards_data'], list): dashboard_data = file_data.get('dashboards_data', [{}])[0] name = dashboard_data.get('name') elif 'templates_data' in file_data and file_data.get('templates_data') \ and isinstance(file_data['templates_data'], list): r_name = file_data.get('templates_data', [{}])[0] name = r_name.get('report_name') else: name = os.path.basename(file_path) return name
def process_incident_fields(file_path: str, print_logs: bool, incidents_types_list: list) -> list: """ Process a incident_fields JSON file Args: file_path: The file path from incident field folder print_logs: Whether to print logs to stdout. incidents_types_list: List of all the incident types in the system. Returns: a list of incident field data. """ res = [] try: if find_type(file_path) == FileType.INCIDENT_FIELD: if print_logs: print(f'adding {file_path} to id_set') res.append(get_incident_field_data(file_path, incidents_types_list)) except Exception as exp: # noqa print_error(f'failed to process {file_path}, Error: {str(exp)}') raise return res
def process_layoutscontainer(file_path: str, print_logs: bool) -> list: """ Process a Layouts_Container JSON file Args: file_path: The file path from layout folder print_logs: Whether to print logs to stdout Returns: a list of layout data. """ res = [] try: if find_type(file_path) == FileType.LAYOUTS_CONTAINER: if print_logs: print(f'adding {file_path} to id_set') res.append(get_layoutscontainer_data(file_path)) except Exception as exp: # noqa print_error(f'failed to process {file_path}, Error: {str(exp)}') raise return res
def content_item_to_package_format( self, content_item_dir: str, del_unified: bool = True, source_mapping: Optional[Dict] = None # noqa: F841 ) -> None: child_files = get_child_files(content_item_dir) for child_file in child_files: cf_name_lower = os.path.basename(child_file).lower() if cf_name_lower.startswith((SCRIPT, AUTOMATION, INTEGRATION)) and cf_name_lower.endswith('yml'): content_item_file_path = child_file file_type = find_type(content_item_file_path) file_type = file_type.value if file_type else file_type try: extractor = Extractor( input=content_item_file_path, file_type=file_type, output=content_item_dir, no_logging=True, no_pipenv=True) extractor.extract_to_package_format() except Exception as e: err_msg = f'Error occurred while trying to split the unified YAML "{content_item_file_path}" ' \ f'into its component parts.\nError: "{e}"' self.contrib_conversion_errs.append(err_msg) if del_unified: os.remove(content_item_file_path)
def generate_pack_tests_configuration(pack_name, existing_test_playbooks): install_logging('Update_Tests_step.log', include_process_name=True) pack_integrations = [] pack_test_playbooks = [] pack_path = os.path.join(PACKS_DIR, pack_name) integration_dir_path = os.path.join(pack_path, INTEGRATIONS_DIR) test_playbook_dir_path = os.path.join(pack_path, TEST_PLAYBOOKS_DIR) if not os.path.isdir(test_playbook_dir_path) or not os.listdir( test_playbook_dir_path): return pack_integrations, pack_test_playbooks, pack_name logging.info(f'Going over {pack_name}') if os.path.exists(integration_dir_path): for file_or_dir in os.listdir(integration_dir_path): if os.path.isdir(os.path.join(integration_dir_path, file_or_dir)): inner_dir_path = os.path.join(integration_dir_path, file_or_dir) for integration_file in os.listdir(inner_dir_path): is_yml_file = integration_file.endswith('.yml') file_path = os.path.join(inner_dir_path, integration_file) if is_yml_file: pack_integrations.append( get_integration_data(file_path)) else: is_yml_file = file_or_dir.endswith('.yml') file_path = os.path.join(integration_dir_path, file_or_dir) if is_yml_file: pack_integrations.append(get_integration_data(file_path)) for file_path in os.listdir(test_playbook_dir_path): is_yml_file = file_path.endswith('.yml') file_path = os.path.join(test_playbook_dir_path, file_path) if is_yml_file and find_type(file_path) == FileType.TEST_PLAYBOOK: test_playbook_id, fromversion = get_playbook_data(file_path) if test_playbook_id not in existing_test_playbooks: pack_test_playbooks.append((test_playbook_id, fromversion)) return pack_integrations, pack_test_playbooks, pack_name