def run_bandit(self, py_num) -> int: """Run bandit Args: py_num: The python version in use Returns: int. 0 on successful bandit run, 1 otherwise. """ lint_files = self._get_lint_files() python_exe = 'python2' if py_num < 3 else 'python3' output = run_command(' '.join( [python_exe, '-m', 'bandit', '-lll', '-iii', '-q', lint_files]), cwd=self.project_dir) self.lock.acquire() print("========= Running bandit on: {} ===============".format( lint_files)) print_v('Using: {} to run bandit'.format(python_exe)) if len(output) == 0: print_color("bandit completed for: {}\n".format(lint_files), LOG_COLORS.GREEN) if self.lock.locked(): self.lock.release() return 0 else: print_error(output) if self.lock.locked(): self.lock.release() return 1
def run_flake8(self, py_num) -> int: """Runs flake8 Args: py_num (int): The python version in use Returns: int. 0 if flake8 is successful, 1 otherwise. """ lint_files = self._get_lint_files() python_exe = 'python2' if py_num < 3 else 'python3' print_v('Using: {} to run flake8'.format(python_exe)) output = run_command(f'{python_exe} -m flake8 {self.project_dir}', cwd=self.configuration.env_dir) self.lock.acquire() print("\n========= Running flake8 on: {}===============".format( lint_files)) if len(output) == 0: print_color("flake8 completed for: {}\n".format(lint_files), LOG_COLORS.GREEN) if self.lock.locked(): self.lock.release() return 0 else: print_error(output) if self.lock.locked(): self.lock.release() return 1
def copy_demistotmock(self): """copy demistomock from content""" if self.demisto_mock: try: shutil.copy(f'{self.configuration.env_dir}/Tests/demistomock/demistomock.py', self.full_output_path) except Exception as err: print_v(f'Could not copy demistomock: {str(err)}')
def check_api_module_imports(self, py_num): """ Checks if the integration imports an API module and if so pastes the module in the package. :param py_num: The python version - api modules are in python 3 """ if py_num > 3: unifier = Unifier(self.project_dir) code_file_path = unifier.get_code_file('.py') try: # Look for an import to an API module in the code. If there is such import, we need to copy the correct # module file to the package directory. with io.open(code_file_path, mode='r', encoding='utf-8') as script_file: _, module_name = unifier.check_api_module_imports( script_file.read()) if module_name: module_path = os.path.join(self.configuration.env_dir, 'Packs', 'ApiModules', 'Scripts', module_name, module_name + '.py') print_v('Copying ' + os.path.join( self.configuration.env_dir, 'Scripts', module_path)) if not os.path.exists(module_path): raise ValueError( 'API Module {} not found, you might be outside of the content repository' ' or this API module does not exist'.format( module_name)) shutil.copy(os.path.join(module_path), self.project_dir) except Exception as e: print_v('Unable to retrieve the module file {}: {}'.format( module_name, str(e)))
def incident_type_uploader(self, path: str): file_name = os.path.basename(path) try: # Wrap the incident object with a list to be compatible with Cortex XSOAR incident_types_unified_data = [get_json(path)] # Create a temp file object incidents_unified_file = NamedTemporaryFile( dir=f'{os.path.dirname(path)}', suffix='.json', delete=False) incidents_unified_file.write( bytes(json.dumps(incident_types_unified_data), 'utf-8')) new_file_path = incidents_unified_file.name incidents_unified_file.close() # Upload the file to Cortex XSOAR result = self.client.import_incident_types_handler( file=new_file_path) # Print results print_v(f'Result:\n{result.to_str()}', self.log_verbose) print_color( f'Uploaded incident type - \'{os.path.basename(path)}\': successfully', LOG_COLORS.GREEN) self.successfully_uploaded_files.append( (file_name, 'Incident Type')) except Exception as err: self._parse_error_response(err, 'incident type', file_name) self.failed_uploaded_files.append((file_name, 'Incident Type')) self.status_code = 1 finally: self._remove_temp_file(new_file_path)
def _get_packages(self, content_repo: git.Repo, input: str, git: bool, all_packs: bool) -> List[Path]: """ Get packages paths to run lint command. Args: content_repo(git.Repo): Content repository object. input(str): dir pack specified as argument. git(bool): Perform lint and test only on chaged packs. all_packs(bool): Whether to run on all packages. Returns: List[Path]: Pkgs to run lint """ pkgs: list if all_packs or git: pkgs = LintManager._get_all_packages( content_dir=content_repo.working_dir) elif not all_packs and not git and not input: pkgs = [Path().cwd()] else: pkgs = [Path(item) for item in input.split(',')] total_found = len(pkgs) if git: pkgs = LintManager._filter_changed_packages( content_repo=content_repo, pkgs=pkgs) for pkg in pkgs: print_v( f"Found changed package {Colors.Fg.cyan}{pkg}{Colors.reset}", log_verbose=self._verbose) print( f"Execute lint and test on {Colors.Fg.cyan}{len(pkgs)}/{total_found}{Colors.reset} packages" ) return pkgs
def _get_playground_id(self): """Retrieves Playground ID from the remote Demisto instance. """ playground_filter = {'filter': {'type': [9]}} answer = self.client.search_investigations(filter=playground_filter) if answer.total == 0: raise RuntimeError( 'No playgrounds were detected in the environment.') playgrounds = answer.data if len(playgrounds) > 1: # if found more than one playground, try to filter to results against the current user user_data, response, _ = self.client.generic_request( path='/user', method='GET', content_type='application/json', response_type=object) if response != 200: raise RuntimeError('Cannot find username') username = user_data.get('username') playgrounds = [ playground for playground in playgrounds if playground.creating_user_id == username ] if len(playgrounds) != 1: raise RuntimeError( f'There is more than one playground to the user. ' f'Number of playgrounds is: {len(playgrounds)}') result = playgrounds[0].id print_v(f'Playground ID: {result}', self.log_verbose) return result
def copy_common_server_python(self): """copy commonserverpython from the base pack""" if self.common_server: try: common_server_path = get_common_server_path(self.configuration.env_dir) shutil.copy(common_server_path, self.full_output_path) except Exception as err: print_v(f'Could not copy CommonServerPython: {str(err)}')
def integration_uploader(self, path: str): is_dir = False file_name = os.path.basename(path) docker45_path = '' try: if os.path.isdir(path): # Create a temporary unified yml file try: is_dir = True unifier = Unifier(input=path, output=path) unified_paths = unifier.merge_script_package_to_yml() path = unified_paths[0] docker45_path = unified_paths[1] if len( unified_paths) > 1 else '' file_name = os.path.basename(path) except IndexError: print_error( f'Error uploading integration from pack. /' f'Check that the given integration path contains a valid integration: {path}.' ) self.status_code = 1 self.failed_uploaded_files.append( (file_name, 'Integration')) return except Exception as err: print_error(str('Upload integration failed\n')) print_error(str(err)) self.failed_uploaded_files.append( (file_name, 'Integration')) self.status_code = 1 return # Upload the file to Cortex XSOAR result = self.client.integration_upload(file=path) # Print results print_v(f'Result:\n{result.to_str()}', self.log_verbose) print_color( f'Uploaded integration - \'{file_name}\': successfully', LOG_COLORS.GREEN) self.successfully_uploaded_files.append((file_name, 'Integration')) except Exception as err: self._parse_error_response(err, 'integration', file_name) self.failed_uploaded_files.append((file_name, 'Integration')) self.status_code = 1 finally: # Remove the temporary file if is_dir: self._remove_temp_file(path) if docker45_path: self._remove_temp_file(docker45_path)
def _docker_run(self, docker_image): workdir = '/devwork' # this is setup in CONTAINER_SETUP_SCRIPT lint_files = os.path.basename(self._get_lint_files()) run_params = [ 'docker', 'create', '-w', workdir, '-e', 'PYLINT_FILES={}'.format(lint_files) ] run_params.extend(['-e', f'PS_LINT_FILES={lint_files}']) if not self.root: run_params.extend(['-u', '{}:4000'.format(os.getuid())]) if not self.run_args['tests']: run_params.extend(['-e', 'PYTEST_SKIP=1']) run_params.extend(['-e', 'PS_TEST_SKIP=1']) if not self.run_args['pylint']: run_params.extend(['-e', 'PYLINT_SKIP=1']) if not self.run_args['pslint']: run_params.extend(['-e', 'PS_LINT_SKIP=1']) run_params.extend(['-e', 'CPU_NUM={}'.format(self.cpu_num)]) run_params.extend(['-e', 'CI={}'.format(os.getenv("CI", "false"))]) run_script_name = self.run_dev_tasks_script_name if self.script_type == TYPE_PYTHON else self.run_dev_tasks_script_pwsh_name run_script = self.run_dev_tasks_script if self.script_type == TYPE_PYTHON else self.run_dev_tasks_script_pwsh run_params.extend([docker_image, 'sh', './{}'.format(run_script_name)]) print_v(f'container create: {run_params}') output = subprocess.check_output(run_params, stderr=subprocess.STDOUT, universal_newlines=True) container_id = output.strip() try: output = output + '\n' + subprocess.check_output( [ 'docker', 'cp', self.project_dir + '/.', container_id + ':' + workdir ], stderr=subprocess.STDOUT, universal_newlines=True) output = output + '\n' + subprocess.check_output( ['docker', 'cp', run_script, container_id + ':' + workdir], universal_newlines=True, stderr=subprocess.STDOUT) output = output + '\n' + subprocess.check_output( ['docker', 'start', '-a', container_id], stderr=subprocess.STDOUT, universal_newlines=True) return output, 0 finally: if not self.keep_container: run_command(f'docker rm {container_id}') else: print("Test container [{}] was left available".format( container_id))
def _get_playground_id(self): """Retrieves Playground ID from the remote Demisto instance. """ playground_filter = {'filter': {'type': [9]}} ans = self.client.search_investigations(filter=playground_filter) if ans.total != 1: raise RuntimeError( f'Got unexpected amount of results in getPlaygroundInvestigationID. ' f'Response was: {ans.total}') result = ans.data[0].id print_v(f'Playground ID: {result}', self.log_verbose) return result
def classifier_uploader(self, path: str): file_name = os.path.basename(path) try: # Upload the file to Cortex XSOAR result = self.client.import_classifier(file=path) # Print results print_v(f'Result:\n{result.to_str()}', self.log_verbose) print_color( f'Uploaded classifier - \'{os.path.basename(path)}\': successfully', LOG_COLORS.GREEN) self.successfully_uploaded_files.append((file_name, 'Classifier')) except Exception as err: self._parse_error_response(err, 'classifier', file_name) self.failed_uploaded_files.append((file_name, 'Classifier')) self.status_code = 1
def _get_packages(self, content_repo: git.Repo, input: str, git: bool, all_packs: bool, base_branch: str) \ -> List[Path]: """ Get packages paths to run lint command. Args: content_repo(git.Repo): Content repository object. input(str): dir pack specified as argument. git(bool): Perform lint and test only on changed packs. all_packs(bool): Whether to run on all packages. base_branch (str): Name of the branch to run the diff on. Returns: List[Path]: Pkgs to run lint """ pkgs: list if all_packs or git: pkgs = LintManager._get_all_packages( content_dir=content_repo.working_dir) elif not all_packs and not git and not input: pkgs = [Path().cwd()] else: pkgs = [] for item in input.split(','): is_pack = os.path.isdir(item) and os.path.exists( os.path.join(item, PACKS_PACK_META_FILE_NAME)) if is_pack: pkgs.extend( LintManager._get_all_packages(content_dir=item)) else: pkgs.append(Path(item)) total_found = len(pkgs) if git: pkgs = self._filter_changed_packages(content_repo=content_repo, pkgs=pkgs, base_branch=base_branch) for pkg in pkgs: print_v( f"Found changed package {Colors.Fg.cyan}{pkg}{Colors.reset}", log_verbose=self._verbose) print( f"Execute lint and test on {Colors.Fg.cyan}{len(pkgs)}/{total_found}{Colors.reset} packages" ) return pkgs
def generate_integration_context(input_path: str, examples: Optional[str] = None, insecure: bool = False, verbose: bool = False, output_path: Optional[str] = None): """ Generate integration command contexts in-place. Args: output_path: Output path input_path: path to the yaml integration. examples: path to the command examples. insecure: should use insecure. verbose: verbose (debug mode). """ if not output_path: output_path = input_path try: yml_data = get_yaml(input_path) # Parse examples file example_dict = generate_example_dict(examples, insecure) for command in example_dict: print_v(f'Building context for the {command} command...', verbose) example = example_dict.get(command) # Generate the examples with a local server for _, _, outputs in example: output_with_contexts = dict_from_outputs_str(command, outputs, verbose=verbose) output_contexts = output_with_contexts.get('outputs') yml_data = insert_outputs(yml_data, command, output_contexts) # Make the changes in place the input yml print_success(f'Writing outputs to {output_path}') write_yml(output_path, yml_data) except ValueError as ex: if verbose: raise else: print_error(f'Error: {str(ex)}') return 1 return 0
def _docker_login(self): if self.docker_login_completed: return True docker_user = os.getenv('DOCKERHUB_USER', None) if not docker_user: print_v('DOCKERHUB_USER not set. Not trying to login to dockerhub') return False docker_pass = os.getenv('DOCKERHUB_PASSWORD', None) # pass is optional for local testing scenario. allowing password to be passed via stdin cmd = ['docker', 'login', '-u', docker_user] if docker_pass: cmd.append('--password-stdin') res = subprocess.run(cmd, input=docker_pass, capture_output=True, text=True) if res.returncode != 0: print("Failed docker login: {}".format(res.stderr)) return False print_v("Completed docker login") self.docker_login_completed = True return True
def _setup_dev_files_py(self, py_num): # copy demistomock and common server try: shutil.copy( self.configuration.env_dir + '/Tests/demistomock/demistomock.py', self.project_dir) open(self.project_dir + '/CommonServerUserPython.py', 'a').close() # create empty file shutil.rmtree(self.project_dir + '/__pycache__', ignore_errors=True) shutil.copy( self.configuration.env_dir + '/Tests/scripts/dev_envs/pytest/conftest.py', self.project_dir) self.check_api_module_imports(py_num) if "/Scripts/CommonServerPython" not in self.project_dir: # Otherwise we already have the CommonServerPython.py file common_server_path = get_common_server_path( self.configuration.env_dir) shutil.copy(common_server_path, self.project_dir) except Exception as e: print_v( 'Could not copy demistomock and CommonServer files: {}'.format( str(e)))
def run_vulture(self, py_num) -> int: """Run vulture Args: py_num: The python version in use Returns: int. 0 on successful vulture run, 1 otherwise. """ lint_files = self._get_lint_files() python_exe = 'python2' if py_num < 3 else 'python3' cmd_args = [ python_exe, '-m', 'vulture', lint_files, '--min-confidence', os.environ.get('VULTURE_MIN_CONFIDENCE_LEVEL', '100') ] vulture_whitelist_path = os.path.join(self.project_dir, '.vulture_whitelist.py') if os.path.isfile(vulture_whitelist_path): cmd_args.insert(4, vulture_whitelist_path) output = run_command(' '.join(cmd_args), cwd=self.project_dir) self.lock.acquire() print("========= Running vulture on: {} ===============".format( lint_files)) print_v('Using: {} to run vulture'.format(python_exe)) if len(output) == 0: print_color("vulture completed for: {}\n".format(lint_files), LOG_COLORS.GREEN) if self.lock.locked(): self.lock.release() return 0 else: print_error(output) if self.lock.locked(): self.lock.release() return 1
def run_dev_packages(self) -> int: return_code = 0 # load yaml _, yml_path = get_yml_paths_in_dir( self.project_dir, Errors.no_yml_file(self.project_dir)) if not yml_path: return 1 print_v('Using yaml file: {}'.format(yml_path)) with open(yml_path, 'r') as yml_file: yml_data = yaml.safe_load(yml_file) script_obj = yml_data if isinstance(script_obj.get('script'), dict): script_obj = script_obj.get('script') script_type = script_obj.get('type') if script_type != 'python': if script_type == 'powershell': # TODO powershell linting return 0 print( 'Script is not of type "python". Found type: {}. Nothing to do.' .format(script_type)) return 0 dockers = get_all_docker_images(script_obj) py_num = get_python_version(dockers[0], self.log_verbose) self.lock.acquire() print_color( "============ Starting process for: {} ============\n".format( self.project_dir), LOG_COLORS.YELLOW) if self.lock.locked(): self.lock.release() self._setup_dev_files(py_num) if self.run_args['flake8']: result_val = self.run_flake8(py_num) if result_val: return_code = result_val if self.run_args['mypy']: result_val = self.run_mypy(py_num) if result_val: return_code = result_val if self.run_args['bandit']: result_val = self.run_bandit(py_num) if result_val: return_code = result_val for docker in dockers: for try_num in (1, 2): print_v("Using docker image: {}".format(docker)) py_num = get_python_version(docker, self.log_verbose) try: if self.run_args['tests'] or self.run_args['pylint']: if py_num == 2.7: requirements = self.requirements_2 else: requirements = self.requirements_3 docker_image_created = self._docker_image_create( docker, requirements) output, status_code = self._docker_run( docker_image_created) self.lock.acquire() print_color( "\n========== Running tests/pylint for: {} =========" .format(self.project_dir), LOG_COLORS.YELLOW) if status_code == 1: raise subprocess.CalledProcessError(*output) else: print(output) print_color( "============ Finished process for: {} " "with docker: {} ============\n".format( self.project_dir, docker), LOG_COLORS.GREEN) if self.lock.locked(): self.lock.release() break # all is good no need to retry except subprocess.CalledProcessError as ex: if ex.output: print_color( "=========================== ERROR IN {}===========================" "\n{}\n".format(self.project_dir, ex.output), LOG_COLORS.RED) else: print_color( "========= Test Failed on {}, Look at the error/s above ========\n" .format(self.project_dir), LOG_COLORS.RED) return_code = 1 if not self.log_verbose: sys.stderr.write( "Need a more detailed log? try running with the -v options as so: \n{} -v\n\n" .format(" ".join(sys.argv[:]))) if self.lock.locked(): self.lock.release() # circle ci docker setup sometimes fails on if try_num > 1 or not ex.output or 'read: connection reset by peer' not in ex.output: return 2 else: sys.stderr.write( "Retrying as failure seems to be docker communication related...\n" ) finally: sys.stdout.flush() sys.stderr.flush() return return_code
def run_dev_packages(self) -> int: return_code = 0 supported_types = (TYPE_PYTHON, TYPE_PWSH) if self.script_type not in supported_types: print_warning( f'Script is not of types: {supported_types}. Found type: {self.script_type}. Nothing to do.' ) return 0 dockers = get_all_docker_images(self.script_obj) print_color( "============ Starting process for: {} ============\n".format( self.project_dir), LOG_COLORS.YELLOW) if self.script_type == TYPE_PYTHON: return_code = self.run_py_non_docker_tasks(dockers) if self.script_type == TYPE_PWSH: self._setup_dev_files_pwsh() for docker in dockers: for try_num in (1, 2): print_v("Using docker image: {}".format(docker)) try: if self.run_args['tests'] or self.run_args['pylint']: docker_image_created = self._docker_image_create( docker) output, status_code = self._docker_run( docker_image_created) with self.lock: print_color( "\n========== Running tests/pylint for: {} =========" .format(self.project_dir), LOG_COLORS.YELLOW) if status_code == 1: raise subprocess.CalledProcessError(*output) else: print(output) print_color( "============ Finished process for: {} " "with docker: {} ============\n".format( self.project_dir, docker), LOG_COLORS.GREEN) break # all is good no need to retry except subprocess.CalledProcessError as ex: with self.lock: if ex.output: print_color( "=========================== ERROR IN {}===========================" "\n{}\n".format(self.project_dir, ex.output), LOG_COLORS.RED) else: print_color( "========= Test Failed on {}, Look at the error/s above ========\n" .format(self.project_dir), LOG_COLORS.RED) return_code = 1 if not get_log_verbose(): sys.stderr.write( "Need a more detailed log? try running with the -v options as so: \n{} -v\n\n" .format(" ".join(sys.argv[:]))) # circle ci docker setup sometimes fails on if try_num > 1 or not ex.output or 'read: connection reset by peer' not in ex.output: return 2 else: sys.stderr.write( "Retrying as failure seems to be docker communication related...\n" ) finally: sys.stdout.flush() sys.stderr.flush() return return_code
def report_unit_tests(self, lint_status: dict, pkgs_status: dict, return_exit_code: int): """ Log failed unit-tests , if verbosity specified will log also success unit-tests Args: lint_status(dict): Overall lint status pkgs_status(dict): All pkgs status dict return_exit_code(int): exit code will indicate which lint or test failed """ # Indentation config preferred_width = 100 pack_indent = 2 pack_prefix = " " * pack_indent + "- Package: " wrapper_pack = textwrap.TextWrapper(initial_indent=pack_prefix, width=preferred_width, subsequent_indent=' ' * len(pack_prefix)) docker_indent = 6 docker_prefix = " " * docker_indent + "- Image: " wrapper_docker_image = textwrap.TextWrapper( initial_indent=docker_prefix, width=preferred_width, subsequent_indent=' ' * len(docker_prefix)) test_indent = 9 test_prefix = " " * test_indent + "- " wrapper_test = textwrap.TextWrapper(initial_indent=test_prefix, width=preferred_width, subsequent_indent=' ' * len(test_prefix)) error_indent = 9 error_first_prefix = " " * error_indent + " Error: " error_sec_prefix = " " * error_indent + " " wrapper_first_error = textwrap.TextWrapper( initial_indent=error_first_prefix, width=preferred_width, subsequent_indent=' ' * len(error_first_prefix)) wrapper_sec_error = textwrap.TextWrapper( initial_indent=error_sec_prefix, width=preferred_width, subsequent_indent=' ' * len(error_sec_prefix)) # Log passed unit-tests headline_printed = False passed_printed = False for pkg, status in pkgs_status.items(): if status.get("images"): if status.get("images")[0].get("pytest_json", {}).get("report", {}).get("tests"): if (not headline_printed and self._verbose) and ( EXIT_CODES["pytest"] & return_exit_code): # Log unit-tests sentence = " Unit Tests " print(f"\n{Colors.Fg.cyan}{'#' * len(sentence)}") print(f"{sentence}") print(f"{'#' * len(sentence)}{Colors.reset}") headline_printed = True if not passed_printed: print_v( f"\n{Colors.Fg.green}Passed Unit-tests:{Colors.reset}", log_verbose=self._verbose) passed_printed = True print_v(wrapper_pack.fill( f"{Colors.Fg.green}{pkg}{Colors.reset}"), log_verbose=self._verbose) for image in status["images"]: if not image.get("image_errors"): tests = image.get("pytest_json", {}).get("report", {}).get("tests") if tests: print_v(wrapper_docker_image.fill( image['image']), log_verbose=self._verbose) for test_case in tests: if test_case.get( "call", {}).get("outcome") != "failed": name = re.sub( pattern=r"\[.*\]", repl="", string=test_case.get("name")) print_v(wrapper_test.fill(name), log_verbose=self._verbose) # Log failed unit-tests if EXIT_CODES["pytest"] & return_exit_code: if not headline_printed: # Log unit-tests sentence = " Unit Tests " print(f"\n{Colors.Fg.cyan}{'#' * len(sentence)}") print(f"{sentence}") print(f"{'#' * len(sentence)}{Colors.reset}") print(f"\n{Colors.Fg.red}Failed Unit-tests:{Colors.reset}") for fail_pack in lint_status["fail_packs_pytest"]: print( wrapper_pack.fill( f"{Colors.Fg.red}{fail_pack}{Colors.reset}")) for image in pkgs_status[fail_pack]["images"]: tests = image.get("pytest_json", {}).get("report", {}).get("tests") if tests: for test_case in tests: if test_case.get("call", {}).get("outcome") == "failed": name = re.sub(pattern=r"\[.*\]", repl="", string=test_case.get("name")) print(wrapper_test.fill(name)) if test_case.get("call", {}).get("longrepr"): print( wrapper_docker_image.fill( image['image'])) for i in range( len( test_case.get( "call", {}).get("longrepr"))): if i == 0: print( wrapper_first_error.fill( test_case.get( "call", {}).get( "longrepr")[i])) else: print( wrapper_sec_error.fill( test_case.get( "call", {}).get( "longrepr")[i])) print('\n') else: print(wrapper_docker_image.fill(image['image'])) errors = image.get("pytest_errors", {}) if errors: print(wrapper_sec_error.fill(errors))
def _docker_image_create(self, docker_base_image): """Create the docker image with dev dependencies. Will check if already existing. Uses a hash of the requirements to determine the image tag Arguments: docker_base_image (string): docker image to use as base for installing dev deps Returns: string. image name to use """ if ':' not in docker_base_image: docker_base_image += ':latest' setup_script = self.container_setup_script setup_script_name = self.container_setup_script_name if self.script_type == TYPE_PWSH: setup_script = self.container_setup_script_pwsh setup_script_name = self.container_setup_script_pwsh_name docker_input = None else: py_num = get_python_version(docker_base_image) if py_num == 2.7: requirements = self.requirements_2 else: requirements = self.requirements_3 docker_input = requirements with open(setup_script, "rb") as f: setup_script_data = f.read() if self.script_type == TYPE_PYTHON: md5 = hashlib.md5( requirements.encode('utf-8') + setup_script_data).hexdigest() else: md5 = hashlib.md5(setup_script_data).hexdigest() target_image = 'devtest' + docker_base_image + '-' + md5 lock_file = ".lock-" + target_image.replace("/", "-") try: if (time.time() - os.path.getctime(lock_file)) > (60 * 5): print("{}: Deleting old lock file: {}".format( datetime.now(), lock_file)) os.remove(lock_file) except Exception as ex: print_v( "Failed check and delete for lock file: {}. Error: {}".format( lock_file, ex)) wait_print = True for x in range(60): images_ls = run_command(' '.join([ 'docker', 'image', 'ls', '--format', '{{.Repository}}:{{.Tag}}', target_image ])).strip() if images_ls == target_image: print('{}: Using already existing docker image: {}'.format( datetime.now(), target_image)) return target_image if wait_print: print( "{}: Existing image: {} not found will obtain lock file or wait for image" .format(datetime.now(), target_image)) wait_print = False print_v("Trying to obtain lock file: " + lock_file) try: f = open(lock_file, "x") f.close() print("{}: Obtained lock file: {}".format( datetime.now(), lock_file)) break except Exception as ex: print_v("Failed getting lock. Will wait {}".format(str(ex))) time.sleep(5) try: # try doing a pull try: print("{}: Trying to pull image: {}".format( datetime.now(), target_image)) pull_res = subprocess.check_output( ['docker', 'pull', target_image], stderr=subprocess.STDOUT, universal_newlines=True) print("Pull succeeded with output: {}".format(pull_res)) return target_image except subprocess.CalledProcessError as cpe: print_v( "Failed docker pull (will create image) with status: {}. Output: {}" .format(cpe.returncode, cpe.output)) print( "{}: Creating docker image: {} (this may take a minute or two...)" .format(datetime.now(), target_image)) update_cert = os.getenv('DEMISTO_LINT_UPDATE_CERTS', 'yes') docker_create = [ 'docker', 'create', '-e', f'DEMISTO_LINT_UPDATE_CERTS={update_cert}', '-i', docker_base_image, 'sh', '/' + setup_script_name ] print_v(f'running: {docker_create}') container_id = subprocess.check_output( docker_create, universal_newlines=True).strip() print_v(f'created container with id: {container_id}') subprocess.check_call([ 'docker', 'cp', setup_script, container_id + ':/' + setup_script_name ]) if self.script_type == TYPE_PWSH: if update_cert == 'yes': subprocess.check_call([ 'docker', 'cp', self.cert_file, container_id + ':/usr/local/share/ca-certificates/custom.crt' ]) print_v( subprocess.check_output( ['docker', 'start', '-a', '-i', container_id], input=docker_input, stderr=subprocess.STDOUT, universal_newlines=True)) print_v( subprocess.check_output( ['docker', 'commit', container_id, target_image], stderr=subprocess.STDOUT, universal_newlines=True)) print_v( subprocess.check_output(['docker', 'rm', container_id], stderr=subprocess.STDOUT, universal_newlines=True)) if self._docker_login(): print("{}: Pushing image: {} to docker hub".format( datetime.now(), target_image)) print_v( subprocess.check_output(['docker', 'push', target_image], stderr=subprocess.STDOUT, universal_newlines=True)) except subprocess.CalledProcessError as err: print( "Failed executing command with error: {} Output: \n{}".format( err, err.output)) raise err finally: try: os.remove(lock_file) except Exception as ex: print("{}: Error removing file: {}".format(datetime.now(), ex)) print('{}: Done creating docker image: {}'.format( datetime.now(), target_image)) return target_image
def __init__(self, project_dir: str, no_test: bool = False, no_pylint: bool = False, no_flake8: bool = False, no_mypy: bool = False, root: bool = False, keep_container: bool = False, cpu_num: int = 0, configuration: Configuration = Configuration(), lock: threading.Lock = threading.Lock(), no_bandit: bool = False, no_pslint: bool = False, requirements_3: str = '', requirements_2: str = '', no_vulture: bool = False): if no_test and no_pylint and no_flake8 and no_mypy and no_bandit and no_vulture: raise ValueError("Nothing to run as all --no-* options specified.") self.configuration = configuration dev_scripts_dir = os.path.join(self.configuration.sdk_env_dir, 'common', 'dev_sh_scripts') self.run_dev_tasks_script_name = 'run_dev_tasks.sh' self.run_dev_tasks_script_pwsh_name = 'run_dev_tasks_pwsh.sh' self.run_mypy_script_name = 'run_mypy.sh' self.container_setup_script_name = 'pkg_dev_container_setup.sh' self.container_setup_script_pwsh_name = 'pkg_dev_container_setup_pwsh.sh' self.cert_file = os.path.join(dev_scripts_dir, 'panw-cert.crt') self.run_dev_tasks_script = os.path.join( dev_scripts_dir, self.run_dev_tasks_script_name) self.run_dev_tasks_script_pwsh = os.path.join( dev_scripts_dir, self.run_dev_tasks_script_pwsh_name) self.container_setup_script = os.path.join( dev_scripts_dir, self.container_setup_script_name) self.container_setup_script_pwsh = os.path.join( dev_scripts_dir, self.container_setup_script_pwsh_name) self.run_mypy_script = os.path.join(dev_scripts_dir, self.run_mypy_script_name) self.docker_login_completed = False self.project_dir = os.path.abspath( os.path.join(self.configuration.env_dir, project_dir)) if self.project_dir[-1] != os.sep: self.project_dir = os.path.join(self.project_dir, '') self.root = root self.keep_container = keep_container self.cpu_num = cpu_num self.common_server_created = False self.run_args = { 'pylint': not no_pylint, 'flake8': not no_flake8, 'mypy': not no_mypy, 'bandit': not no_bandit, 'tests': not no_test, 'pslint': not no_pslint, 'vulture': not no_vulture, } self.lock = lock self.requirements_3 = requirements_3 self.requirements_2 = requirements_2 # load yaml _, yml_path = get_yml_paths_in_dir( self.project_dir, Errors.no_yml_file(self.project_dir)) if not yml_path: raise ValueError(f'yml path failed for: {self.project_dir}') print_v('Using yaml file: {}'.format(yml_path)) with open(yml_path, 'r') as yml_file: yml_data = yaml.safe_load(yml_file) self.script_obj = yml_data if isinstance(self.script_obj.get('script'), dict): self.script_obj = self.script_obj.get('script') self.script_type = self.script_obj.get('type')
def _docker_image_create(self, docker_base_image, requirements): """Create the docker image with dev dependencies. Will check if already existing. Uses a hash of the requirements to determine the image tag Arguments: docker_base_image (string): docker image to use as base for installing dev deps requirements (string): requirements doc Returns: string. image name to use """ if ':' not in docker_base_image: docker_base_image += ':latest' with open(self.container_setup_script, "rb") as f: setup_script_data = f.read() md5 = hashlib.md5(requirements.encode('utf-8') + setup_script_data).hexdigest() target_image = 'devtest' + docker_base_image + '-' + md5 lock_file = ".lock-" + target_image.replace("/", "-") try: if (time.time() - os.path.getctime(lock_file)) > (60 * 5): print("{}: Deleting old lock file: {}".format( datetime.now(), lock_file)) os.remove(lock_file) except Exception as ex: print_v( "Failed check and delete for lock file: {}. Error: {}".format( lock_file, ex)) wait_print = True for x in range(60): images_ls = run_command(' '.join([ 'docker', 'image', 'ls', '--format', '{{.Repository}}:{{.Tag}}', target_image ])).strip() if images_ls == target_image: print('{}: Using already existing docker image: {}'.format( datetime.now(), target_image)) return target_image if wait_print: print( "{}: Existing image: {} not found will obtain lock file or wait for image" .format(datetime.now(), target_image)) wait_print = False print_v("Trying to obtain lock file: " + lock_file) try: f = open(lock_file, "x") f.close() print("{}: Obtained lock file: {}".format( datetime.now(), lock_file)) break except Exception as ex: print_v("Failed getting lock. Will wait {}".format(str(ex))) time.sleep(5) try: # try doing a pull try: print("{}: Trying to pull image: {}".format( datetime.now(), target_image)) pull_res = subprocess.check_output( ['docker', 'pull', target_image], stderr=subprocess.STDOUT, universal_newlines=True) print("Pull succeeded with output: {}".format(pull_res)) return target_image except subprocess.CalledProcessError as cpe: print_v( "Failed docker pull (will create image) with status: {}. Output: {}" .format(cpe.returncode, cpe.output)) print( "{}: Creating docker image: {} (this may take a minute or two...)" .format(datetime.now(), target_image)) container_id = run_command(' '.join([ 'docker', 'create', '-i', docker_base_image, 'sh', '/' + self.container_setup_script_name ])).strip() subprocess.check_call([ 'docker', 'cp', self.container_setup_script, container_id + ':/' + self.container_setup_script_name ]) print_v( subprocess.check_output( ['docker', 'start', '-a', '-i', container_id], input=requirements, stderr=subprocess.STDOUT, universal_newlines=True)) print_v( subprocess.check_output( ['docker', 'commit', container_id, target_image], stderr=subprocess.STDOUT, universal_newlines=True)) print_v( subprocess.check_output(['docker', 'rm', container_id], stderr=subprocess.STDOUT, universal_newlines=True)) if self._docker_login(): print("{}: Pushing image: {} to docker hub".format( datetime.now(), target_image)) print_v( subprocess.check_output(['docker', 'push', target_image], stderr=subprocess.STDOUT, universal_newlines=True)) except subprocess.CalledProcessError as err: print( "Failed executing command with error: {} Output: \n{}".format( err, err.output)) raise err finally: try: os.remove(lock_file) except Exception as ex: print("{}: Error removing file: {}".format(datetime.now(), ex)) print('{}: Done creating docker image: {}'.format( datetime.now(), target_image)) return target_image
def file_uploader(self, path: str) -> int: """ Upload a file. Args: path: The path of the file to upload. The rest of the parameters are taken from self. Returns: """ try: upload_object: Union[YAMLObject, JSONObject] = path_to_pack_object(path) except ContentFactoryError: file_name = os.path.split(path)[-1] message = f"Cannot upload {path} as the file type is not supported for upload." if self.log_verbose: click.secho(message, fg='bright_red') self.failed_uploaded_files.append((file_name, "Unknown", message)) return ERROR_RETURN_CODE file_name = upload_object.path.name # type: ignore entity_type = find_type(str(upload_object.path)) if entity_type in UPLOAD_SUPPORTED_ENTITIES: if upload_object.from_version <= self.demisto_version <= upload_object.to_version: # type: ignore try: result = upload_object.upload(self.client) # type: ignore if self.log_verbose: print_v(f'Result:\n{result.to_str()}', self.log_verbose) click.secho( f'Uploaded {entity_type} - \'{os.path.basename(path)}\': successfully', fg='green') self.successfully_uploaded_files.append( (file_name, entity_type.value)) return SUCCESS_RETURN_CODE except Exception as err: message = parse_error_response(err, entity_type, file_name, self.log_verbose) self.failed_uploaded_files.append( (file_name, entity_type.value, message)) return ERROR_RETURN_CODE else: if self.log_verbose: click.secho( f"Input path {path} is not uploading due to version mismatch.\n" f"XSOAR version is: {self.demisto_version} while the file's version is " f"{upload_object.from_version} - {upload_object.to_version}", fg='bright_red') self.unuploaded_due_to_version.append( (file_name, entity_type.value, self.demisto_version, upload_object.from_version, upload_object.to_version)) return ERROR_RETURN_CODE else: if self.log_verbose: click.secho( f'\nError: Given input path: {path} is not uploadable. ' f'Input path should point to one of the following:\n' f' 1. Pack\n' f' 2. A content entity directory that is inside a pack. For example: an Integrations directory or ' f'a Layouts directory\n' f' 3. Valid file that can be imported to Cortex XSOAR manually. ' f'For example a playbook: helloWorld.yml', fg='bright_red') self.failed_uploaded_files.append( (file_name, entity_type.value, 'Unsuported file path/type')) return ERROR_RETURN_CODE