def _print_final_results(self, good_pkgs: List[str], fail_pkgs: List[str]) -> int: """Print the results of parallel lint command. Args: good_pkgs (list): A list of packages that passed lint. fail_pkgs (list): A list of packages that failed lint Returns: int. 0 on success and 1 if any package failed """ if fail_pkgs: print_color("\n******* FAIL PKGS: *******", LOG_COLORS.RED) print_color("\n\t{}\n".format("\n\t".join(fail_pkgs)), LOG_COLORS.RED) if good_pkgs: print_color("\n******* SUCCESS PKGS: *******", LOG_COLORS.GREEN) print_color("\n\t{}\n".format("\n\t".join(good_pkgs)), LOG_COLORS.GREEN) if not good_pkgs and not fail_pkgs: print_color("\n******* No changed packages found *******\n", LOG_COLORS.YELLOW) if fail_pkgs: return 1 else: return 0
def upload(self): """Upload the integration specified in self.infile to the remote Demisto instance. """ try: if self.unify: # Create a temporary unified yml file try: unifier = Unifier(self.path, outdir=self.path) self.path = unifier.merge_script_package_to_yml()[0][0] except IndexError: print_color( 'Error: Path input is not a valid package directory.', LOG_COLORS.RED) return 1 # Upload the file to Demisto result = self.client.integration_upload(file=self.path) # Print results print_v(f'Result:\n{result.to_str()}', self.log_verbose) print_color(f'Uploaded \'{result.name}\' successfully', LOG_COLORS.GREEN) except Exception as ex: raise ex finally: if self.unify and os.path.exists( self.path): # Remove the temporary file os.remove(self.path) return 0
def run_mypy(self, py_num) -> int: """Runs mypy Args: py_num: The python version in use Returns: int. 0 on successful mypy run, 1 otherwise. """ self.get_common_server_python() lint_files = self._get_lint_files() sys.stdout.flush() script_path = os.path.abspath( os.path.join(self.configuration.sdk_env_dir, self.run_mypy_script)) output = run_command(' '.join( ['bash', script_path, str(py_num), lint_files]), cwd=self.project_dir) self.lock.acquire() print( "========= Running mypy on: {} ===============".format(lint_files)) if 'Success: no issues found in 1 source file' in output: print(output) print_color("mypy completed for: {}\n".format(lint_files), LOG_COLORS.GREEN) self.remove_common_server_python() if self.lock.locked(): self.lock.release() return 0 else: print_error(output) self.remove_common_server_python() if self.lock.locked(): self.lock.release() return 1
def run_bandit(self, py_num) -> int: """Run bandit Args: py_num: The python version in use Returns: int. 0 on successful bandit run, 1 otherwise. """ lint_files = self._get_lint_files() python_exe = 'python2' if py_num < 3 else 'python3' output = run_command(' '.join( [python_exe, '-m', 'bandit', '-lll', '-iii', '-q', lint_files]), cwd=self.project_dir) self.lock.acquire() print("========= Running bandit on: {} ===============".format( lint_files)) print_v('Using: {} to run bandit'.format(python_exe)) if len(output) == 0: print_color("bandit completed for: {}\n".format(lint_files), LOG_COLORS.GREEN) if self.lock.locked(): self.lock.release() return 0 else: print_error(output) if self.lock.locked(): self.lock.release() return 1
def _run_query(self, playground_id: str): """Runs a query on Demisto instance and prints the output. Args: playground_id: The investigation ID of the playground. Returns: list. A list of the log IDs if debug mode is on, otherwise an empty list. """ update_entry = {'investigationId': playground_id, 'data': self.query} ans = self.client.investigation_add_entries_sync( update_entry=update_entry) log_ids = [] for entry in ans: # ans should have entries with `contents` - the readable output of the command if entry.parent_content: print_color('### Command:', LOG_COLORS.YELLOW) print(entry.parent_content) if entry.contents: print_color('## Readable Output', LOG_COLORS.YELLOW) if entry.type == ERROR_ENTRY_TYPE: print_error(entry.contents + '\n') else: print(entry.contents + '\n') # and entries with `file_id`s defined, that is the fileID of the debug log file if entry.type == DEBUG_FILE_ENTRY_TYPE: log_ids.append(entry.id) return log_ids
def run_flake8(self, py_num) -> int: """Runs flake8 Args: py_num (int): The python version in use Returns: int. 0 if flake8 is successful, 1 otherwise. """ lint_files = self._get_lint_files() python_exe = 'python2' if py_num < 3 else 'python3' print_v('Using: {} to run flake8'.format(python_exe)) output = run_command(f'{python_exe} -m flake8 {self.project_dir}', cwd=self.configuration.env_dir) self.lock.acquire() print("\n========= Running flake8 on: {}===============".format( lint_files)) if len(output) == 0: print_color("flake8 completed for: {}\n".format(lint_files), LOG_COLORS.GREEN) if self.lock.locked(): self.lock.release() return 0 else: print_error(output) if self.lock.locked(): self.lock.release() return 1
def pack_init(self) -> bool: """Creates a pack directory tree. Returns: bool. Returns True if pack was created successfully and False otherwise """ # if an output directory given create the pack there if len(self.output_dir) > 0: self.full_output_path = os.path.join(self.output_dir, self.dir_name) # content-descriptor file indicates we are in "content" repository # thus we will create the pack under Packs directory elif os.path.isfile('content-descriptor.json'): self.full_output_path = os.path.join("Packs", self.dir_name) # if non of the above conditions apply - create the pack in current directory else: self.full_output_path = self.dir_name if not self.create_new_directory(): return False for directory in self.DIR_LIST: path = os.path.join(self.full_output_path, directory) os.mkdir(path=path) with open(os.path.join(self.full_output_path, 'CHANGELOG.md'), 'a') as fp: fp.write("## [Unreleased]") fp = open(os.path.join(self.full_output_path, 'README.md'), 'a') fp.close() with open(os.path.join(self.full_output_path, 'metadata.json'), 'a') as fp: # TODO fill this once metadata.json script is ready fp.write('[]') fp = open(os.path.join(self.full_output_path, '.secrets-ignore'), 'a') fp.close() fp = open(os.path.join(self.full_output_path, '.pack-ignore'), 'a') fp.close() print_color( f"Successfully created the pack {self.dir_name} in: {self.full_output_path}", LOG_COLORS.GREEN) create_integration = str( input("\nDo you want to create an integration in the pack? Y/N ") ).lower() if create_integration in ['y', 'yes']: integration_init = Initiator(output_dir=os.path.join( self.full_output_path, 'Integrations'), integration=True) return integration_init.init() return True
def run_playbook(self): # type: () -> int """Run a playbook in Demisto. Returns: int. 0 in success, 1 in a failure. """ # create an incident with the given playbook try: incident_id = self.create_incident_with_playbook( incident_name=f'inc_{self.playbook_id}', playbook_id=self.playbook_id) except ApiException as a: print_error(str(a)) return 1 work_plan_link = self.base_link_to_workplan + str(incident_id) if self.should_wait: print( f'Waiting for the playbook to finish running.. \n' f'To see the playbook run in real-time please go to : {work_plan_link}', LOG_COLORS.GREEN) elasped_time = 0 start_time = time.time() while elasped_time < self.timeout: playbook_results = self.get_playbook_results_dict(incident_id) if playbook_results["state"] == "inprogress": time.sleep(10) elasped_time = int(time.time() - start_time) else: # the playbook has finished running break # Ended the loop because of timeout if elasped_time >= self.timeout: print_error( f'The command had timed out while the playbook is in progress.\n' f'To keep tracking the playbook please go to : {work_plan_link}' ) else: if playbook_results["state"] == "failed": print_error( "The playbook finished running with status: FAILED") else: print_color( "The playbook has completed its run successfully", LOG_COLORS.GREEN) # The command does not wait for the playbook to finish running else: print(f'To see results please go to : {work_plan_link}') return 0
def validate_all_files(self): """Validate all files in the repo are in the right format.""" # go over packs for root, dirs, _ in os.walk(PACKS_DIR): for dir_in_dirs in dirs: for directory in PACKS_DIRECTORIES: for inner_root, inner_dirs, files in os.walk( os.path.join(root, dir_in_dirs, directory)): for inner_dir in inner_dirs: if inner_dir.startswith('.'): continue project_dir = os.path.join(inner_root, inner_dir) _, file_path = get_yml_paths_in_dir( os.path.normpath(project_dir), Errors.no_yml_file(project_dir)) if file_path: print("Validating {}".format(file_path)) structure_validator = StructureValidator( file_path) if not structure_validator.is_valid_scheme(): self._is_valid = False # go over regular content entities for directory in DIR_LIST_FOR_REGULAR_ENTETIES: print_color('Validating {} directory:'.format(directory), LOG_COLORS.GREEN) for root, dirs, files in os.walk(directory): for file_name in files: file_path = os.path.join(root, file_name) # skipping hidden files if not file_name.endswith('.yml'): continue print('Validating ' + file_name) structure_validator = StructureValidator(file_path) if not structure_validator.is_valid_scheme(): self._is_valid = False # go over regular PACKAGE_SUPPORTING_DIRECTORIES entities for directory in PACKAGE_SUPPORTING_DIRECTORIES: for root, dirs, files in os.walk(directory): for inner_dir in dirs: if inner_dir.startswith('.'): continue project_dir = os.path.join(root, inner_dir) _, file_path = get_yml_paths_in_dir( project_dir, Errors.no_yml_file(project_dir)) if file_path: print('Validating ' + file_path) structure_validator = StructureValidator(file_path) if not structure_validator.is_valid_scheme(): self._is_valid = False
def merge_script_package_to_yml(self): """Merge the various components to create an output yml file """ print("Merging package: {}".format(self.package_path)) if self.package_path.endswith('/'): self.package_path = self.package_path.rstrip('/') package_dir_name = os.path.basename(self.package_path) output_filename = '{}-{}.yml'.format(DIR_TO_PREFIX[self.dir_name], package_dir_name) if self.dest_path: self.dest_path = os.path.join(self.dest_path, output_filename) else: self.dest_path = os.path.join(self.dir_name, output_filename) yml_paths, yml_path = get_yml_paths_in_dir( self.package_path, Errors.no_yml_file(self.package_path)) for path in yml_paths: # The plugin creates a unified YML file for the package. # In case this script runs locally and there is a unified YML file in the package we need to ignore it. # Also, # we don't take the unified file by default because # there might be packages that were not created by the plugin. if 'unified' not in path: yml_path = path break with open(yml_path, 'r') as yml_file: yml_data = yaml.safe_load(yml_file) script_obj = yml_data if self.dir_name != SCRIPTS_DIR: script_obj = yml_data['script'] script_type = TYPE_TO_EXTENSION[script_obj['type']] with io.open(yml_path, mode='r', encoding='utf-8') as yml_file: yml_text = yml_file.read() yml_text, script_path = self.insert_script_to_yml( script_type, yml_text, yml_data) image_path = None desc_path = None if self.dir_name in (INTEGRATIONS_DIR, BETA_INTEGRATIONS_DIR): yml_text, image_path = self.insert_image_to_yml(yml_data, yml_text) yml_text, desc_path = self.insert_description_to_yml( yml_data, yml_text) output_map = self.write_yaml_with_docker(yml_text, yml_data, script_obj) unifier_outputs = list( output_map.keys()), yml_path, script_path, image_path, desc_path print_color("Created unified yml: {}".format(unifier_outputs[0][0]), LOG_COLORS.GREEN) return unifier_outputs
def update_yml(self): """Manager function for the generic YML updates.""" print_color( F'=======Starting updates for YML: {self.source_file}=======', LOG_COLORS.YELLOW) self.remove_copy_and_dev_suffixes_from_name() self.update_id_to_equal_name() self.set_version_to_default() print_color( F'=======Finished generic updates for YML: {self.output_file_name}=======', LOG_COLORS.YELLOW)
def _export_debug_log(self, log_ids: list): """Retrieve & rexport debug mode log files Args: log_ids (list): artifact ids of the log files """ if not self.debug_path: print_color('## Detailed Log', LOG_COLORS.YELLOW) for log_id in log_ids: result = self.client.download_file(log_id) with open(result, 'r+') as log_info: for line in log_info: if self.SECTIONS_HEADER_REGEX.match(line): print_color(line, LOG_COLORS.YELLOW) elif self.FULL_LOG_REGEX.match(line): print_color('Full Integration Log:', LOG_COLORS.YELLOW) else: print(line) else: with open(self.debug_path, 'w+b') as output_file: for log_id in log_ids: result = self.client.download_file(log_id) with open(result, 'r+') as log_info: for line in log_info: output_file.write(line.encode('utf-8')) print_color( f'Debug Log successfully exported to {self.debug_path}', LOG_COLORS.GREEN)
def find_secrets(self): print_color('Starting secrets detection', LOG_COLORS.GREEN) is_circle = self.is_circle branch_name = self.get_branch_name() is_forked = re.match(EXTERNAL_PR_REGEX, branch_name) is not None if not is_forked: secrets_found = self.get_secrets(branch_name, is_circle) if secrets_found: return True else: print_color( 'Finished validating secrets, no secrets were found.', LOG_COLORS.GREEN) return False
def validate_against_previous_version(self, no_error=False): """Validate all files that were changed between previous version and branch_sha Args: no_error (bool): If set to true will restore self._is_valid after run (will not return new errors) """ if self.prev_ver and self.prev_ver != 'master': print_color('Starting validation against {}'.format(self.prev_ver), LOG_COLORS.GREEN) modified_files, _, _, _ = self.get_modified_and_added_files( self.prev_ver) prev_self_valid = self._is_valid self.validate_modified_files(modified_files) if no_error: self._is_valid = prev_self_valid
def format_file(self): """Manager function for the script YML updater.""" super().update_yml() print_color( F'========Starting updates for script: {self.source_file}=======', LOG_COLORS.YELLOW) self.save_yml_to_destination_file() print_color( F'========Finished updates for script: {self.output_file_name}=======', LOG_COLORS.YELLOW) return self.initiate_file_validator(ScriptValidator, 'script')
def json_to_outputs(command, infile, prefix, outfile=None, verbose=False, interactive=False): """ This script parses JSON to Demisto Outputs YAML format Args: command: the name of the command that this output is belong like xdr-get-incidents infile: full path to valid JSON file - the JSON file should contain API response from the service prefix: The prefix of the context, this prefix will appear for each output field - VirusTotal.IP, CortexXDR.Incident outfile: Full path to output file where to save the YAML verbose: This used for debugging purposes - more logs interactive: by default all the output descriptions are empty, but if user sets this to True then the script will ask user input for each description Returns: """ try: if infile: with open(infile, 'r') as json_file: input_json = json_file.read() else: print("Dump your JSON here:") input_json = input_multiline() yaml_output = parse_json(input_json, command, prefix, verbose, interactive) if outfile: with open(outfile, 'w') as yf: yf.write(yaml_output) print_color(f'Outputs file was saved to :\n{outfile}', LOG_COLORS.GREEN) else: print_color("YAML Outputs\n\n", LOG_COLORS.GREEN) print(yaml_output) except Exception as ex: if verbose: raise else: print_error(f'Error: {str(ex)}') sys.exit(1)
def __init__(self, source_file='', output_file_name=''): self.source_file = source_file if not self.source_file: print_color( 'Please provide <source path>, <optional - destination path>.', LOG_COLORS.RED) sys.exit(1) try: self.yml_data = self.get_yml_data_as_dict() except yaml.YAMLError: print_color('Provided file is not a valid YML.', LOG_COLORS.RED) sys.exit(1) self.output_file_name = self.set_output_file_name(output_file_name) self.id_and_version_location = self.get_id_and_version_path_object()
def format_file(self): """Manager function for the integration YML updater.""" super().update_yml() print_color( F'========Starting updates for integration: {self.source_file}=======', LOG_COLORS.YELLOW) self.update_proxy_insecure_param_to_default() self.set_reputation_commands_basic_argument_as_needed() self.save_yml_to_destination_file() print_color( F'========Finished updates for integration: {self.output_file_name}=======', LOG_COLORS.YELLOW) return self.initiate_file_validator(IntegrationValidator, 'integration')
def format_file(self): """Manager function for the playbook YML updater.""" super().update_yml() print_color( F'========Starting updates for playbook: {self.source_file}=======', LOG_COLORS.YELLOW) self.add_description() self.update_playbook_task_name() self.update_fromversion() self.save_yml_to_destination_file() print_color( F'========Finished updates for playbook: {self.output_file_name}=======', LOG_COLORS.YELLOW) return self.initiate_file_validator(PlaybookValidator, 'playbook')
def integration_init(self) -> bool: """Creates a new integration according to a template. Returns: bool. True if the integration was created successfully, False otherwise. """ # if output directory given create the integration there if len(self.output_dir) > 0: self.full_output_path = os.path.join(self.output_dir, self.dir_name) # will create the integration under the Integrations directory of the pack elif os.path.isdir(INTEGRATIONS_DIR): self.full_output_path = os.path.join('Integrations', self.dir_name) # if non of the conditions above apply - create the integration in the local directory else: self.full_output_path = self.dir_name if not self.create_new_directory(): return False hello_world_path = os.path.normpath( os.path.join(__file__, "..", "..", 'common', 'templates', self.HELLO_WORLD_INTEGRATION)) copy_tree(str(hello_world_path), self.full_output_path) if self.id != self.HELLO_WORLD_INTEGRATION: # note rename does not work on the yml file - that is done in the yml_reformatting function. self.rename(current_suffix=self.HELLO_WORLD_INTEGRATION) self.yml_reformatting(current_suffix=self.HELLO_WORLD_INTEGRATION) self.fix_test_file_import( name_to_change=self.HELLO_WORLD_INTEGRATION) print_color(f"Finished creating integration: {self.full_output_path}.", LOG_COLORS.GREEN) return True
def update_fromversion(self): """If no fromversion is specified, asks the user for it's value and updates the playbook.""" print(F'Updating fromversion tag') if not self.yml_data.get('fromversion', ''): print_color( 'No fromversion is specified for this playbook, would you like me to update for you? [Y/n]', LOG_COLORS.RED) user_answer = input() if user_answer in ['n', 'N', 'no', 'No']: print_error('Moving forward without updating fromversion tag') return is_input_version_valid = False while not is_input_version_valid: print_color('Please specify the desired version X.X.X', LOG_COLORS.YELLOW) user_desired_version = input() if re.match(r'\d+\.\d+\.\d+', user_desired_version): self.yml_data['fromversion'] = user_desired_version is_input_version_valid = True else: print_error('Version format is not valid')
def create_incident_with_playbook(self, incident_name, playbook_id): # type: (str, str) -> int """Create an incident in Demisto with the given incident_name and the given playbook_id Args: incident_name (str): The name of the incident playbook_id (str): The id of the playbook Raises: ApiException: if the client has failed to create an incident Returns: int. The new incident's ID. """ create_incident_request = demisto_client.demisto_api.CreateIncidentRequest( ) create_incident_request.create_investigation = True create_incident_request.playbook_id = playbook_id create_incident_request.name = incident_name try: response = self.demisto_client.create_incident( create_incident_request=create_incident_request) except ApiException as e: print_error( f'Failed to create incident with playbook id : "{playbook_id}", ' 'possible reasons are:\n' '1. This playbook name does not exist \n' '2. Schema problems in the playbook \n' '3. Unauthorized api key') raise e print_color( f'The playbook: {self.playbook_id} was triggered successfully.', LOG_COLORS.GREEN) return response.id
def run(self): print_color('Starting validating files structure', LOG_COLORS.GREEN) if self.is_valid_structure(): print_color('The files are valid', LOG_COLORS.GREEN) return 0 else: print_color( 'The files were found as invalid, the exact error message can be located above', LOG_COLORS.RED) return 1
def initiate_file_validator(self, validator_type, scheme_type): print_color('Starting validating files structure', LOG_COLORS.GREEN) structure = StructureValidator(file_path=str(self.output_file_name), predefined_scheme=scheme_type) validator = validator_type(structure) if structure.is_valid_file() and validator.is_valid_file( validate_rn=False): print_color('The files are valid', LOG_COLORS.GREEN) return 0 else: print_color('The files are invalid', LOG_COLORS.RED) return 1
def run(self): """ This function will try to load integration/script yml file. Creates test playbook, and converts each command to automation task in test playbook and generates verify outputs task from command outputs. All the tasks eventually will be linked to each other: playbook_start_task => delete_context(all) => task1 => verify_outputs_task1 => task2 => verify_outputs_task2 => task_end At the end the functions dumps the new test playbook to the outdir if set, otherwise file will be created in local directory """ if self.outdir: if not os.path.isdir(self.outdir): print_error(f'Directory not exist: {self.outdir}') return ryaml = YAML() ryaml.preserve_quotes = True try: with open(self.integration_yml_path, 'r') as yf: yaml_obj = ryaml.load(yf) yaml_obj.get('name') except FileNotFoundError as ex: if self.verbose: raise print_error(str(ex)) return except AttributeError: print_error(f'Error - failed to parse: {self.integration_yml_path}.\nProbably invalid yml file') return test_playbook = Playbook( name=self.name, fromversion='4.5.0' ) if self.file_type == ContentItemType.INTEGRATION: for command in yaml_obj.get('script').get('commands'): create_automation_task_and_verify_outputs_task( test_playbook=test_playbook, command=command, item_type=ContentItemType.INTEGRATION, no_outputs=self.no_outputs ) elif self.file_type == ContentItemType.SCRIPT: create_automation_task_and_verify_outputs_task( test_playbook=test_playbook, command=yaml_obj, item_type=ContentItemType.INTEGRATION, no_outputs=self.no_outputs ) test_playbook.add_task(create_end_task(test_playbook.task_counter)) with open(self.test_playbook_yml_path, 'w') as yf: ryaml.dump(test_playbook.to_dict(), yf) print_color(f'Test playbook yml was saved at:\n{self.test_playbook_yml_path}', LOG_COLORS.GREEN)
def extract_to_package_format(self) -> int: """Extracts the self.yml_path into several files according to the Demisto standard of the package format. Returns: int. status code for the operation. """ print("Starting migration of: {} to dir: {}".format(self.yml_path, self.dest_path)) arg_path = self.dest_path output_path = os.path.abspath(self.dest_path) os.makedirs(output_path, exist_ok=True) base_name = os.path.basename(output_path) yml_type = self.get_yml_type() code_file = "{}/{}.py".format(output_path, base_name) self.extract_code(code_file) self.extract_image("{}/{}_image.png".format(output_path, base_name)) self.extract_long_description("{}/{}_description.md".format(output_path, base_name)) yaml_out = "{}/{}.yml".format(output_path, base_name) print("Creating yml file: {} ...".format(yaml_out)) ryaml = YAML() ryaml.preserve_quotes = True with open(self.yml_path, 'r') as yf: yaml_obj = ryaml.load(yf) script_obj = yaml_obj if yml_type == INTEGRATION: script_obj = yaml_obj['script'] del yaml_obj['image'] if 'detaileddescription' in yaml_obj: del yaml_obj['detaileddescription'] if script_obj['type'] != 'python': print('Script is not of type "python". Found type: {}. Nothing to do.'.format(script_obj['type'])) return 1 script_obj['script'] = SingleQuotedScalarString('') with open(yaml_out, 'w') as yf: ryaml.dump(yaml_obj, yf) print("Running autopep8 on file: {} ...".format(code_file)) try: subprocess.call(["autopep8", "-i", "--max-line-length", "130", code_file]) except FileNotFoundError: print_color("autopep8 skipped! It doesn't seem you have autopep8 installed.\n" "Make sure to install it with: pip install autopep8.\n" "Then run: autopep8 -i {}".format(code_file), LOG_COLORS.YELLOW) print("Detecting python version and setting up pipenv files ...") docker = get_docker_images(script_obj)[0] py_ver = get_python_version(docker, self.config.log_verbose) pip_env_dir = get_pipenv_dir(py_ver, self.config.envs_dirs_base) print("Copying pipenv files from: {}".format(pip_env_dir)) shutil.copy("{}/Pipfile".format(pip_env_dir), output_path) shutil.copy("{}/Pipfile.lock".format(pip_env_dir), output_path) try: subprocess.call(["pipenv", "install", "--dev"], cwd=output_path) print("Installing all py requirements from docker: [{}] into pipenv".format(docker)) requirements = subprocess.check_output(["docker", "run", "--rm", docker, "pip", "freeze", "--disable-pip-version-check"], universal_newlines=True, stderr=subprocess.DEVNULL).strip() fp = tempfile.NamedTemporaryFile(delete=False) fp.write(requirements.encode('utf-8')) fp.close() try: subprocess.check_call(["pipenv", "install", "-r", fp.name], cwd=output_path) except Exception: print_color("Failed installing requirements in pipenv.\n " "Please try installing manually after extract ends\n", LOG_COLORS.RED) os.unlink(fp.name) print("Installing flake8 for linting") subprocess.call(["pipenv", "install", "--dev", "flake8"], cwd=output_path) except FileNotFoundError: print_color("pipenv install skipped! It doesn't seem you have pipenv installed.\n" "Make sure to install it with: pip3 install pipenv.\n" "Then run in the package dir: pipenv install --dev", LOG_COLORS.YELLOW) # check if there is a changelog yml_changelog = os.path.splitext(self.yml_path)[0] + '_CHANGELOG.md' changelog = arg_path + '/CHANGELOG.md' if os.path.exists(yml_changelog): shutil.copy(yml_changelog, changelog) else: with open(changelog, 'wt', encoding='utf-8') as changelog_file: changelog_file.write("## [Unreleased]\n-\n") print_color("\nCompleted: setting up package: {}\n".format(arg_path), LOG_COLORS.GREEN) print("Next steps: \n", "* Install additional py packages for unit testing (if needed): cd {}; pipenv install <package>\n".format( arg_path), "* Create unit tests\n", "* Check linting and unit tests by running: ./Tests/scripts/pkg_dev_test_tasks.py -d {}\n".format( arg_path), "* When ready rm from git the source yml and add the new package:\n", " git rm {}\n".format(self.yml_path), " git add {}\n".format(arg_path), sep='' ) return 0
def run_dev_packages(self) -> int: return_code = 0 # load yaml _, yml_path = get_yml_paths_in_dir( self.project_dir, Errors.no_yml_file(self.project_dir)) if not yml_path: return 1 print_v('Using yaml file: {}'.format(yml_path)) with open(yml_path, 'r') as yml_file: yml_data = yaml.safe_load(yml_file) script_obj = yml_data if isinstance(script_obj.get('script'), dict): script_obj = script_obj.get('script') script_type = script_obj.get('type') if script_type != 'python': if script_type == 'powershell': # TODO powershell linting return 0 print( 'Script is not of type "python". Found type: {}. Nothing to do.' .format(script_type)) return 0 dockers = get_all_docker_images(script_obj) py_num = get_python_version(dockers[0], self.log_verbose) self.lock.acquire() print_color( "============ Starting process for: {} ============\n".format( self.project_dir), LOG_COLORS.YELLOW) if self.lock.locked(): self.lock.release() self._setup_dev_files(py_num) if self.run_args['flake8']: result_val = self.run_flake8(py_num) if result_val: return_code = result_val if self.run_args['mypy']: result_val = self.run_mypy(py_num) if result_val: return_code = result_val if self.run_args['bandit']: result_val = self.run_bandit(py_num) if result_val: return_code = result_val for docker in dockers: for try_num in (1, 2): print_v("Using docker image: {}".format(docker)) py_num = get_python_version(docker, self.log_verbose) try: if self.run_args['tests'] or self.run_args['pylint']: if py_num == 2.7: requirements = self.requirements_2 else: requirements = self.requirements_3 docker_image_created = self._docker_image_create( docker, requirements) output, status_code = self._docker_run( docker_image_created) self.lock.acquire() print_color( "\n========== Running tests/pylint for: {} =========" .format(self.project_dir), LOG_COLORS.YELLOW) if status_code == 1: raise subprocess.CalledProcessError(*output) else: print(output) print_color( "============ Finished process for: {} " "with docker: {} ============\n".format( self.project_dir, docker), LOG_COLORS.GREEN) if self.lock.locked(): self.lock.release() break # all is good no need to retry except subprocess.CalledProcessError as ex: if ex.output: print_color( "=========================== ERROR IN {}===========================" "\n{}\n".format(self.project_dir, ex.output), LOG_COLORS.RED) else: print_color( "========= Test Failed on {}, Look at the error/s above ========\n" .format(self.project_dir), LOG_COLORS.RED) return_code = 1 if not self.log_verbose: sys.stderr.write( "Need a more detailed log? try running with the -v options as so: \n{} -v\n\n" .format(" ".join(sys.argv[:]))) if self.lock.locked(): self.lock.release() # circle ci docker setup sometimes fails on if try_num > 1 or not ex.output or 'read: connection reset by peer' not in ex.output: return 2 else: sys.stderr.write( "Retrying as failure seems to be docker communication related...\n" ) finally: sys.stdout.flush() sys.stderr.flush() return return_code
def re_create_id_set(): start_time = time.time() scripts_list = [] playbooks_list = [] integration_list = [] testplaybooks_list = [] pool = Pool(processes=cpu_count() * 2) print_color("Starting the creation of the id_set", LOG_COLORS.GREEN) print_color("Starting iterating over Integrations", LOG_COLORS.GREEN) for arr in pool.map(process_integration, get_integrations_paths()): integration_list.extend(arr) print_color("Starting iterating over Playbooks", LOG_COLORS.GREEN) for arr in pool.map(process_playbook, get_playbooks_paths()): playbooks_list.extend(arr) print_color("Starting iterating over Scripts", LOG_COLORS.GREEN) for arr in pool.map(process_script, get_scripts_paths()): scripts_list.extend(arr) print_color("Starting iterating over TestPlaybooks", LOG_COLORS.GREEN) for pair in pool.map(process_test_playbook_path, get_test_playbooks_paths()): if pair[0]: testplaybooks_list.append(pair[0]) if pair[1]: scripts_list.append(pair[1]) new_ids_dict = OrderedDict() # we sort each time the whole set in case someone manually changed something # it shouldn't take too much time new_ids_dict['scripts'] = sort(scripts_list) new_ids_dict['playbooks'] = sort(playbooks_list) new_ids_dict['integrations'] = sort(integration_list) new_ids_dict['TestPlaybooks'] = sort(testplaybooks_list) with open('./Tests/id_set.json', 'w') as id_set_file: json.dump(new_ids_dict, id_set_file, indent=4) exec_time = time.time() - start_time print_color( "Finished the creation of the id_set. Total time: {} seconds".format( exec_time), LOG_COLORS.GREEN) duplicates = find_duplicates(new_ids_dict) if any(duplicates): print_error( 'The following duplicates were found: {}'.format(duplicates))
def validate_added_files(self, added_files): # noqa: C901 """Validate the added files from your branch. In case we encounter an invalid file we set the self._is_valid param to False. Args: added_files (set): A set of the modified files in the current branch. """ for file_path in added_files: print('Validating {}'.format(file_path)) if re.match(TEST_PLAYBOOK_REGEX, file_path, re.IGNORECASE): continue structure_validator = StructureValidator(file_path) if not structure_validator.is_valid_file(): self._is_valid = False if self.validate_id_set: if not self.id_set_validator.is_file_valid_in_set(file_path): self._is_valid = False if self.id_set_validator.is_file_has_used_id(file_path): self._is_valid = False elif re.match(PLAYBOOK_REGEX, file_path, re.IGNORECASE): playbook_validator = PlaybookValidator(structure_validator) if not playbook_validator.is_valid_playbook(): self._is_valid = False elif checked_type(file_path, YML_INTEGRATION_REGEXES): image_validator = ImageValidator(file_path) if not image_validator.is_valid(): self._is_valid = False description_validator = DescriptionValidator(file_path) if not description_validator.is_valid(): self._is_valid = False integration_validator = IntegrationValidator( structure_validator) if not integration_validator.is_valid_file(validate_rn=False): self._is_valid = False elif checked_type(file_path, PACKAGE_SCRIPTS_REGEXES): unifier = Unifier(os.path.dirname(file_path)) yml_path, _ = unifier.get_script_package_data() # Set file path to the yml file structure_validator.file_path = yml_path script_validator = ScriptValidator(structure_validator) if not script_validator.is_valid_file(validate_rn=False): self._is_valid = False elif re.match(BETA_INTEGRATION_REGEX, file_path, re.IGNORECASE) or \ re.match(BETA_INTEGRATION_YML_REGEX, file_path, re.IGNORECASE): description_validator = DescriptionValidator(file_path) if not description_validator.is_valid_beta_description(): self._is_valid = False integration_validator = IntegrationValidator( structure_validator) if not integration_validator.is_valid_beta_integration(): self._is_valid = False elif re.match(IMAGE_REGEX, file_path, re.IGNORECASE): image_validator = ImageValidator(file_path) if not image_validator.is_valid(): self._is_valid = False # incident fields and indicator fields are using the same scheme. elif checked_type(file_path, JSON_INDICATOR_AND_INCIDENT_FIELDS): incident_field_validator = IncidentFieldValidator( structure_validator) if not incident_field_validator.is_valid_file(): self._is_valid = False elif checked_type(file_path, JSON_ALL_LAYOUT_REGEXES): layout_validator = LayoutValidator(structure_validator) if not layout_validator.is_valid_layout(): self._is_valid = False elif 'CHANGELOG' in file_path: self.is_valid_release_notes(file_path) elif checked_type(file_path, [REPUTATION_REGEX]): print_color( F'Skipping validation for file {file_path} since no validation is currently defined.', LOG_COLORS.YELLOW) elif checked_type(file_path, CHECKED_TYPES_REGEXES): pass else: print_error( "The file type of {} is not supported in validate command". format(file_path)) print_error( "validate command supports: Integrations, Scripts, Playbooks, " "Incident fields, Indicator fields, Images, Release notes, Layouts and Descriptions" ) self._is_valid = False