def test_instances(secret_conf_path, server, username, password): c = get_demisto_instance_and_login(server, username, password) integrations = get_integrations(secret_conf_path) instance_ids = [] failed_integration = [] integrations_counter = 0 for integration in integrations: integrations_counter += 1 integration_name = integration.get('name', None) integration_params = integration.get('params', None) devops_comments = integration.get('devops_comments', None) product_description = integration.get('product_description', None) is_byoi = integration.get('byoi', True) has_integration = integration.get('has_integration', True) if has_integration: instance_id = __create_integration_instance(c, integration_name, integration_params, is_byoi) if not instance_id: print_error('Failed to create instance of %s' % (integration_name,)) failed_integration.append("{0} {1} - {2}".format(integration_name, product_description, devops_comments)) else: instance_ids.append(instance_id) print('Create integration %s succeed' % (integration_name,)) __delete_integrations_instances(c, instance_ids) return failed_integration, integrations_counter
def is_valid_in_id_set(self, file_path, obj_data, obj_set): """Check if the file is represented correctly in the id_set Args: file_path (string): Path to the file. obj_data (dict): Dictionary that holds the extracted details from the given file. obj_set (set): The set in which the file should be located at. Returns: bool. Whether the file is represented correctly in the id_set or not. """ is_found = False file_id = obj_data.keys()[0] for checked_instance in obj_set: checked_instance_id = checked_instance.keys()[0] checked_instance_data = checked_instance[checked_instance_id] checked_instance_toversion = checked_instance_data.get('toversion', '99.99.99') checked_instance_fromversion = checked_instance_data.get('fromversion', '0.0.0') obj_to_version = obj_data[file_id].get('toversion', '99.99.99') obj_from_version = obj_data[file_id].get('fromversion', '0.0.0') if checked_instance_id == file_id and checked_instance_toversion == obj_to_version and \ checked_instance_fromversion == obj_from_version: is_found = True if checked_instance_data != obj_data[file_id]: print_error("You have failed to update id_set.json with the data of {} " "please run `python Tests/scripts/update_id_set.py`".format(file_path)) return False if not is_found: print_error("You have failed to update id_set.json with the data of {} " "please run `python Tests/scripts/update_id_set.py`".format(file_path)) return is_found
def __delete_integration_instance(client, instance_id): res = client.req('DELETE', '/settings/integration/' + urllib.quote(instance_id), {}) if res.status_code != 200: print_error('delete integration instance failed\nStatus code' + str(res.status_code)) print_error(pformat(res.json())) return False return True
def __get_integration_config(client, integration_name): res = client.req('POST', '/settings/integration/search', { 'page': 0, 'size': 100, 'query': 'name:' + integration_name }) res = res.json() TIMEOUT = 180 SLEEP_INTERVAL = 5 total_sleep = 0 while 'configurations' not in res: if total_sleep == TIMEOUT: print_error("Timeout - failed to get integration {} configuration. Error: {}".format(integration_name, res)) return None time.sleep(SLEEP_INTERVAL) total_sleep += SLEEP_INTERVAL all_configurations = res['configurations'] match_configurations = [x for x in all_configurations if x['name'] == integration_name] if not match_configurations or len(match_configurations) == 0: print_error('integration was not found') return None return match_configurations[0]
def get_demisto_instance_and_login(server, username, password): c = demisto.DemistoClient(None, server, username, password) res = c.Login() if res.status_code != 200: print_error("Login has failed with status code " + str(res.status_code)) sys.exit(1) return c
def is_docker_image_changed(self): """Check if the Docker image was changed or not.""" if self.old_integration.get('script', {}).get('dockerimage', "") != \ self.current_integration.get('script', {}).get('dockerimage', ""): print_error("Possible backwards compatibility break, You've changed the docker for the file {}" " this is not allowed.".format(self.file_path)) self._is_valid = False return True return False
def is_docker_image_changed(self): """Check if the docker image as been changed.""" is_docker_added = re.search("\+([ ]+)?dockerimage: .*", self.change_string) is_docker_deleted = re.search("-([ ]+)?dockerimage: .*", self.change_string) if is_docker_added or is_docker_deleted: print_error("Possible backwards compatibility break, You've changed the docker for the file {}" " this is not allowed.".format(self.file_path)) self._is_valid = False return True return False
def load_id_set(self): with open(self.ID_SET_PATH, 'r') as id_set_file: try: id_set = json.load(id_set_file) except ValueError as ex: if "Expecting property name" in ex.message: print_error("You probably merged from master and your id_set.json has conflicts. " "Run `python Tests/scripts/update_id_set.py -r`, it should reindex your id_set.json") raise return id_set
def __delete_incident(client, incident): res = client.req('POST', '/incident/batchDelete', { 'ids': [incident['id']], 'filter': {}, 'all': False }) if res.status_code != 200: print_error('delete incident failed\nStatus code' + str(res.status_code)) print_error(pformat(res.json())) return False return True
def __test_integration_instance(client, module_instance): res = client.req('POST', '/settings/integration/test', module_instance) if res.status_code != 200: print_error('Integration-instance test ("Test" button) failed.\nBad status code: ' + str(res.status_code)) return False result_object = res.json() success = result_object['success'] if not success: print_error('Test integration failed.\n Failure message: ' + result_object['message']) return False return True
def __disable_integrations_instances(client, module_instances): for configured_instance in module_instances: # tested with POSTMAN, this is the minimum required fields for the request. module_instance = { key: configured_instance[key] for key in ['id', 'brand', 'name', 'data', 'isIntegrationScript', ] } module_instance['enable'] = "false" module_instance['version'] = -1 res = client.req('PUT', '/settings/integration', module_instance) if res.status_code != 200: print_error('disable instance failed with status code ' + str(res.status_code)) print_error(pformat(res.json()))
def __get_integration_config(client, integration_name): res = client.req('POST', '/settings/integration/search', { 'page': 0, 'size': 100, 'query': 'name:' + integration_name }) res = res.json() all_configurations = res['configurations'] match_configurations = [x for x in all_configurations if x['name'] == integration_name] if not match_configurations or len(match_configurations) == 0: print_error('integration was not found') return None return match_configurations[0]
def is_added_required_fields(self): """Check if required field were added.""" current_field_to_required = self._get_field_to_required_dict(self.current_integration) old_field_to_required = self._get_field_to_required_dict(self.old_integration) for field, required in current_field_to_required.items(): if (field not in old_field_to_required.keys() and required) or \ (required and field in old_field_to_required.keys() and required != old_field_to_required[field]): print_error("You've added required fields in the integration " "file '{}', the field is '{}'".format(self.file_path, field)) self._is_valid = False return True return False
def is_changed_command_name_or_arg(self): """Check if a command name or argument as been changed. Returns: bool. Whether a command name or argument as been changed. """ current_command_to_args = self._get_command_to_args(self.current_integration) old_command_to_args = self._get_command_to_args(self.old_integration) for command, args_dict in old_command_to_args.items(): if command not in current_command_to_args.keys() or \ not self.is_subset_dictionary(current_command_to_args[command], args_dict): print_error("Possible backwards compatibility break, You've changed the name of a command or its arg in" " the file {0} please undo, the command was:\n{1}".format(self.file_path, command)) self._is_valid = False return True return False
def get_test_list(files_string, branch_name): """Create a test list that should run""" modified_files, modified_tests_list, all_tests, is_conf_json, sample_tests, is_reputations_json = \ get_modified_files(files_string) tests = set([]) if modified_files: tests = find_tests_for_modified_files(modified_files) # Adding a unique test for a json file. if is_reputations_json: tests.add('reputations.json Test') for file_path in modified_tests_list: test = collect_ids(file_path) if test not in tests: tests.add(test) if is_conf_json: tests = tests.union(get_test_from_conf(branch_name)) if all_tests: print_warning('Running all tests due to: {}'.format(','.join(all_tests))) tests.add("Run all tests") if sample_tests: # Choosing 3 random tests for infrastructure testing print_warning('Collecting sample tests due to: {}'.format(','.join(sample_tests))) test_ids = get_test_ids(check_nightly_status=True) rand = random.Random(files_string + branch_name) while len(tests) < 3: tests.add(rand.choice(test_ids)) if not tests: if modified_files or modified_tests_list or all_tests: print_error("There are no tests that check the changes you've done, please make sure you write one") global _FAILED _FAILED = True else: print_warning("Running Sanity check only") tests.add('DocumentationTest') # test with integration configured tests.add('TestCommonPython') # test with no integration configured return tests
def is_changed_context_path(self): """Check if a context path as been changed. Returns: bool. Whether a context path as been changed. """ current_command_to_context_paths = self._get_command_to_context_paths(self.current_integration) old_command_to_context_paths = self._get_command_to_context_paths(self.old_integration) for old_command, old_context_paths in old_command_to_context_paths.items(): if old_command in current_command_to_context_paths.keys() and \ not self._is_sub_set(current_command_to_context_paths[old_command], old_context_paths): print_error("Possible backwards compatibility break, You've changed the context in the file {0} please " "undo, the command is:\n{1}".format(self.file_path, old_command)) self._is_valid = False return True return False
def main(argv): if len(argv) < 2: print_error("<Server version>") sys.exit(1) root_dir = argv[0] current_server_version = argv[1] yml_folders_to_scan = ['Integrations', 'Playbooks', 'Scripts', 'TestPlaybooks'] # yml json_folders_to_scan = ['Reports', 'Misc', 'Dashboards', 'Widgets', 'Classifiers', 'Layouts', 'IncidentFields'] # json for folder in yml_folders_to_scan: print('Scanning directory: "{}"'.format(folder)) remove_releaseNotes_folder(os.path.join(root_dir, folder), '*.yml', current_server_version) for folder in json_folders_to_scan: print('Scanning directory: "{}"'.format(folder)) remove_releaseNotes_folder(os.path.join(root_dir, folder), '*.json', current_server_version)
def restart_demisto_service(ami, c): ami.check_call(['sudo', 'service', 'demisto', 'restart']) exit_code = 1 for _ in range(0, SERVICE_RESTART_TIMEOUT, SERVICE_RESTART_POLLING_INTERVAL): sleep(SERVICE_RESTART_POLLING_INTERVAL) if exit_code != 0: exit_code = ami.call(['/usr/sbin/service', 'demisto', 'status', '--lines', '0']) if exit_code == 0: print("{}: Checking login to the server...".format(datetime.now())) try: res = c.Login() if res.status_code == 200: return else: print("Failed verifying login (will retry). status: {}. text: {}".format(res.status_code, res.text)) except Exception as ex: print_error("Failed verifying server start via login: {}".format(ex)) raise Exception('Timeout waiting for demisto service to restart')
def validate_file_release_notes(self): """Validate that the file has proper release notes when modified. This function updates the class attribute self._is_valid instead of passing it back and forth. """ if self.is_renamed: print_warning("You might need RN please make sure to check that.") return if os.path.isfile(self.file_path): rn_path = get_release_notes_file_path(self.file_path) rn = get_latest_release_notes_text(rn_path) # check rn file exists and contain text if rn is None: print_error( 'File {} is missing releaseNotes, Please add it under {}'. format(self.file_path, rn_path)) self._is_valid = False
def __init__(self, file_path, check_git=True, old_file_path=None): self._is_valid = True self.file_path = file_path if check_git: self.current_integration = get_json(file_path) # The replace in the end is for Windows support if old_file_path: git_hub_path = os.path.join(self.CONTENT_GIT_HUB_LINK, old_file_path).replace("\\", "/") file_content = get(git_hub_path).content self.old_integration = yaml.load(file_content) else: try: file_path_from_master = os.path.join(self.CONTENT_GIT_HUB_LINK, file_path).replace("\\", "/") self.old_integration = yaml.load(get(file_path_from_master).content) except Exception: print_error("Could not find the old integration please make sure that you did not break " "backward compatibility") self.old_integration = None
def is_isarray_arguments(self): """Check if a reputation command's (domain/email/file/ip/url) argument of the same name has the 'isArray' attribute set to True Returns: bool. Whether 'isArray' is True """ commands = self.current_integration.get('script', {}).get('commands', []) for command in commands: command_name = command.get('name') if command_name in REPUTATION_COMMANDS: for arg in command.get('arguments', []): arg_name = arg.get('name') if arg_name == command_name: if arg.get('isArray') is False: self._is_valid = False print_error("The argument '{}' of the command '{}' is not configured with 'isArray' set to True" .format(arg_name, command_name)) return self._is_valid
def is_default_arguments(self): """Check if a reputation command (domain/email/file/ip/url) has a default non required argument with the same name Returns: bool. Whether a reputation command hold a valid argument """ commands = self.current_integration.get('script', {}).get('commands', []) for command in commands: command_name = command.get('name') for arg in command.get('arguments', []): arg_name = arg.get('name') if command_name in {'file', 'email', 'domain', 'url', 'ip'} and arg_name == command_name: if arg.get('default') is False: self._is_valid = False print_error("The argument '{}' of the command '{}' is not configured as default" .format(arg_name, command_name)) return self._is_valid
def set_integration_params(demisto_api_key, integrations, secret_params, instance_names, playbook_id): for integration in integrations: integration_params = [ item for item in secret_params if item['name'] == integration['name'] ] if integration_params: matched_integration_params = integration_params[0] if len(integration_params) != 1: found_matching_instance = False for item in integration_params: if item.get('instance_name', 'Not Found') in instance_names: matched_integration_params = item found_matching_instance = True if not found_matching_instance: optional_instance_names = [ optional_integration.get('instance_name', 'None') for optional_integration in integration_params ] print_error( FAILED_MATCH_INSTANCE_MSG.format( playbook_id, len(integration_params), integration['name'], '\n'.join(optional_instance_names))) return False integration['params'] = matched_integration_params.get( 'params', {}) integration['byoi'] = matched_integration_params.get('byoi', True) integration['instance_name'] = matched_integration_params.get( 'instance_name', integration['name']) elif 'Demisto REST API' == integration['name']: integration['params'] = { 'url': 'https://localhost', 'apikey': demisto_api_key, 'insecure': True, } return True
def __init__(self, file_path, check_git=True, old_file_path=None): self._is_valid = True self.file_path = file_path if check_git: self.current_integration = get_yaml(file_path) # The replace in the end is for Windows support if old_file_path: git_hub_path = os.path.join(self.CONTENT_GIT_HUB_LINK, old_file_path).replace("\\", "/") file_content = get(git_hub_path).content self.old_integration = yaml.load(file_content) else: try: file_path_from_master = os.path.join(self.CONTENT_GIT_HUB_LINK, file_path).replace("\\", "/") self.old_integration = yaml.load(get(file_path_from_master).content, Loader=yaml.FullLoader) except Exception: print_error("Could not find the old integration please make sure that you did not break " "backward compatibility") self.old_integration = None
def is_context_path_changed(self): """Check if the context path as been changed.""" deleted_args = re.findall("-([ ]+)?- contextPath: (.*)", self.change_string) added_args = re.findall("\+([ ]+)?- contextPath: (.*)", self.change_string) deleted_args = [arg[1] for arg in deleted_args] added_args = [arg[1] for arg in added_args] for added_arg in added_args: if added_arg in deleted_args: deleted_args.remove(added_arg) if deleted_args: print_error("Possible backwards compatibility break, You've changed the context in the file {0} please " "undo, the line was:{1}".format(self.file_path, "\n".join(deleted_args))) self._is_valid = False return True return False
def is_id_duplicated(self, obj_id, obj_data, obj_type): """Check if the given ID already exist in the system. Args: obj_id (string): The new ID we want to add. obj_data (dict): Dictionary that holds the extracted details from the given file. obj_type (string): the type of the new file. Returns: bool. Whether the ID already exist in the system or not. """ is_duplicated = False dict_value = obj_data.values()[0] obj_toversion = dict_value.get('toversion', '99.99.99') obj_fromversion = dict_value.get('fromversion', '0.0.0') for section, section_data in self.id_set.items(): for instance in section_data: instance_id = instance.keys()[0] instance_to_version = instance[instance_id].get('toversion', '99.99.99') instance_from_version = instance[instance_id].get('fromversion', '0.0.0') if obj_id == instance_id: if section != obj_type: is_duplicated = True break elif obj_fromversion == instance_from_version and obj_toversion == instance_to_version: if instance[instance_id] != obj_data[obj_id]: is_duplicated = True break elif (LooseVersion(obj_fromversion) <= LooseVersion(instance_to_version) and (LooseVersion(obj_toversion) >= LooseVersion(instance_from_version))): is_duplicated = True break if is_duplicated: print_error("The ID {0} already exists, please update the file or update the " "id_set.json toversion field of this id to match the " "old occurrence of this id".format(obj_id)) return is_duplicated
def is_id_duplicated(self, obj_id, obj_data, obj_type): """Check if the given ID already exist in the system. Args: obj_id (string): The new ID we want to add. obj_data (dict): Dictionary that holds the extracted details from the given file. obj_type (string): the type of the new file. Returns: bool. Whether the ID already exist in the system or not. """ is_duplicated = False dict_value = obj_data.values()[0] obj_toversion = dict_value.get('toversion', '99.99.99') obj_fromversion = dict_value.get('fromversion', '0.0.0') for section, section_data in self.id_set.items(): for instance in section_data: instance_id = instance.keys()[0] instance_to_version = instance[instance_id].get('toversion', '99.99.99') instance_from_version = instance[instance_id].get('fromversion', '0.0.0') if obj_id == instance_id: if section != obj_type and LooseVersion(obj_fromversion) < LooseVersion(instance_to_version): is_duplicated = True break elif obj_fromversion == instance_from_version and obj_toversion == instance_to_version: if instance[instance_id] != obj_data[obj_id]: is_duplicated = True break elif (LooseVersion(obj_fromversion) <= LooseVersion(instance_to_version) and (LooseVersion(obj_toversion) >= LooseVersion(instance_from_version))): is_duplicated = True break if is_duplicated: print_error("The ID {0} already exists, please update the file or update the " "id_set.json toversion field of this id to match the " "old occurrence of this id".format(obj_id)) return is_duplicated
def get_secrets(branch_name, is_circle): secrets_found = {} # make sure not in middle of merge if not run_command('git rev-parse -q --verify MERGE_HEAD'): secrets_file_paths = get_all_diff_text_files(branch_name, is_circle) secrets_found = search_potential_secrets(secrets_file_paths) if secrets_found: secrets_found_string = 'Secrets were found in the following files:\n' for file_name in secrets_found: secrets_found_string += ('\nFile Name: ' + file_name) secrets_found_string += json.dumps(secrets_found[file_name], indent=4) if not is_circle: secrets_found_string += '\nRemove or whitelist secrets in order to proceed, then re-commit\n' else: secrets_found_string += 'The secrets were exposed in public repository,' \ ' remove the files asap and report it.\n' secrets_found_string += 'For more information about whitelisting visit: ' \ 'https://github.com/demisto/internal-content/tree/master/documentation/secrets' print_error(secrets_found_string) return secrets_found
def __init__(self, file_path, check_git=True, old_file_path=None): self.file_path = file_path self.current_script = {} self.old_script = {} if check_git: self.current_script = get_yaml(file_path) # The replace in the end is for Windows support if old_file_path: git_hub_path = os.path.join(CONTENT_GITHUB_MASTER_LINK, old_file_path).replace("\\", "/") else: git_hub_path = os.path.join(CONTENT_GITHUB_MASTER_LINK, file_path).replace("\\", "/") try: file_content = requests.get(git_hub_path, verify=False).content self.old_script = yaml.safe_load(file_content) except Exception as e: print(str(e)) print_error("Could not find the old script please make sure that you did not break " "backward compatibility")
def restart_demisto_service(ami, demisto_api_key, server): client = demisto_client.configure(base_url=server, api_key=demisto_api_key, verify_ssl=False) ami.check_call(['sudo', 'service', 'demisto', 'restart']) exit_code = 1 for _ in range(0, SERVICE_RESTART_TIMEOUT, SERVICE_RESTART_POLLING_INTERVAL): sleep(SERVICE_RESTART_POLLING_INTERVAL) if exit_code != 0: exit_code = ami.call(['/usr/sbin/service', 'demisto', 'status', '--lines', '0']) if exit_code == 0: print("{}: Checking login to the server... ".format(datetime.now())) try: res = demisto_client.generic_request_func(self=client, path='/health', method='GET') if int(res[1]) == 200: return else: print("Failed verifying login (will retry). status: {}. text: {}".format(res.status_code, res.text)) except Exception as ex: print_error("Failed verifying server start via login: {}".format(ex)) raise Exception('Timeout waiting for demisto service to restart')
def is_there_duplicate_args(self): """Check if a command has the same arg more than once Returns: bool. True if there are duplicates, False otherwise. """ commands = self.current_integration.get('script', {}).get('commands', []) for command in commands: arg_list = [] for arg in command.get('arguments', []): if arg in arg_list: self._is_valid = False print_error("The argument '{}' of the command '{}' is duplicated in the integration '{}', " "please remove one of its appearances " "as we do not allow duplicates".format(arg, command['name'], self.current_integration.get('name'))) else: arg_list.append(arg) return not self._is_valid
def __print_investigation_error(client, playbook_id, investigation_id, color=LOG_COLORS.RED): try: empty_json = {"pageSize": 1000} res = demisto_client.generic_request_func(self=client, method='POST', path='/investigation/' + urllib.quote( investigation_id), body=empty_json) except requests.exceptions.RequestException as conn_err: print_error( 'Failed to print investigation error, error trying to communicate with demisto ' 'server: {} '.format( conn_err)) if res and int(res[1]) == 200: resp_json = ast.literal_eval(res[0]) entries = resp_json['entries'] print_color('Playbook ' + playbook_id + ' has failed:', color) for entry in entries: if entry['type'] == ENTRY_TYPE_ERROR and entry['parentContent']: print_color('- Task ID: ' + entry['taskId'].encode('utf-8'), color) print_color(' Command: ' + entry['parentContent'].encode('utf-8'), color) print_color(' Body:\n' + entry['contents'].encode('utf-8') + '\n', color)
def is_there_duplicate_params(self): """Check if the integration has the same param more than once Returns: bool. True if there are duplicates, False otherwise. """ configurations = self.current_integration.get('configuration', []) param_list = [] for configuration_param in configurations: param_name = configuration_param['name'] if param_name in param_list: self._is_valid = False print_error("The parameter '{}' of the " "integration '{}' is duplicated, please remove one of its appearances as we do not " "allow duplicated parameters".format(param_name, self.current_integration.get('name'))) else: param_list.append(param_name) return not self._is_valid
def is_valid_subtype(self): """Validate that the subtype is python2 or python3.""" type_ = self.current_integration.get('script', {}).get('type') if type_ == 'python': subtype = self.current_integration.get('script', {}).get('subtype') if not subtype or subtype not in ['python3', 'python2']: print_error( "The subtype for our yml files should be either python2 or python3, " "please update the file {}.".format( self.current_integration.get('name'))) self._is_valid = False if self.old_integration: old_subtype = self.old_integration.get('script', {}).get('subtype', "") if len(old_subtype) > 0 and old_subtype != subtype: print_error( "Possible backwards compatibility break, You've changed the subtype" " of the file {}".format(self.file_path)) self._is_valid = False return self._is_valid
def get_modified_files(files_string): """Get lists of the modified files in your branch according to the files string. Args: files_string (string): String that was calculated by git using `git diff` command. Returns: (modified_files_list, added_files_list, deleted_files). Tuple of sets. """ all_files = files_string.split('\n') deleted_files = set([]) added_files_list = set([]) modified_files_list = set([]) for f in all_files: file_data = f.split() if not file_data: continue file_status = file_data[0] file_path = file_data[1] if file_path.endswith('.js') or file_path.endswith('.py'): continue if file_status.lower() == 'm' and checked_type( file_path) and not file_path.startswith('.'): modified_files_list.add(file_path) elif file_status.lower() == 'a' and checked_type( file_path) and not file_path.startswith('.'): added_files_list.add(file_path) elif file_status.lower() == 'd' and checked_type( file_path) and not file_path.startswith('.'): deleted_files.add(file_path) elif file_status.lower().startswith('r') and checked_type( file_path): modified_files_list.add((file_data[1], file_data[2])) elif file_status.lower() not in KNOWN_FILE_STATUSES: print_error(file_path + " file status is an unknown known one, " "please check. File status was: " + file_status) return modified_files_list, added_files_list, deleted_files
def update_content_on_demisto_instance(client, username, password, server): '''Try to update the content Args: client (demisto_client): The configured client to use. username (str): The username to pass to Tests/update_content_data.py password (str): The password to pass to Tests/update_content_data.py server (str): The server url to pass to Tests/update_content_data.py ''' content_zip_path = 'artifacts/all_content.zip' cmd_str = 'python Tests/update_content_data.py -u {} -p {} -s {} --content_zip {}'.format(username, password, server, content_zip_path) run_command(cmd_str, is_silenced=False) # Check if content update has finished installing sleep_interval = 20 updating_content = is_content_update_in_progress(client) while updating_content.lower() == 'true': sleep(sleep_interval) updating_content = is_content_update_in_progress(client) if updating_content.lower() == 'request unsuccessful': # since the request to check if content update installation finished didn't work, can't use that mechanism # to check and just try sleeping for 30 seconds instead to allow for content update installation to complete sleep(30) else: # check that the content installation updated # verify the asset id matches the circleci build number / asset_id in the content-descriptor.json release, asset_id = get_content_version_details(client) with open('content-descriptor.json', 'r') as cd_file: cd_json = json.loads(cd_file.read()) cd_release = cd_json.get('release') cd_asset_id = cd_json.get('assetId') if release == cd_release and asset_id == cd_asset_id: print_color('Content Update Successfully Installed!', color=LOG_COLORS.GREEN) else: err_details = 'Attempted to install content with release "{}" and assetId '.format(cd_release) err_details += '"{}" but release "{}" and assetId "{}" were '.format(cd_asset_id, release, asset_id) err_details += 'retrieved from the instance post installation.' print_error('Content Update was Unsuccessful:\n{}'.format(err_details)) sys.exit(1)
def is_arg_changed(self): """Check if the argument has been changed.""" deleted_args = re.findall("-([ ]+)?- name: (.*)", self.change_string) added_args = re.findall("\+([ ]+)?- name: (.*)", self.change_string) deleted_args = [arg[1] for arg in deleted_args] added_args = [arg[1] for arg in added_args] for added_arg in added_args: if added_arg in deleted_args: deleted_args.remove(added_arg) if deleted_args: print_error( "Possible backwards compatibility break, You've changed the name of a command or its arg in" " the file {0} please undo, the line was:{1}".format( self.file_path, "\n".join(deleted_args))) self._is_valid = False return True return False
def is_docker_image_latest_tag(self): # If the docker image isn't in the format we expect it to be if not self.docker_image_tag and not self.docker_image_name and not self.docker_image_latest_tag: self.is_latest_tag = False else: server_version = LooseVersion(self.from_version) # Case of a modified file with version >= 5.0.0 if self.is_modified_file and server_version >= '5.0.0': # If docker image name are different and if the docker image isn't the default one if self.docker_image_latest_tag != self.docker_image_tag and not \ 'demisto/python:1.3-alpine' == '{}:{}'.format(self.docker_image_name, self.docker_image_tag): self.is_latest_tag = False # Case of an added file elif not self.is_modified_file: if self.docker_image_latest_tag != self.docker_image_tag: self.is_latest_tag = False if not self.is_latest_tag: print_error( 'The docker image tag is not the latest, please update it.') return self.is_latest_tag
def run_test_logic(c, failed_playbooks, integrations, playbook_id, succeed_playbooks, test_message, test_options, slack, circle_ci, build_number, server_url, build_name, is_mock_run=False): status, inc_id = test_integration(c, integrations, playbook_id, test_options, is_mock_run) if status == PB_Status.COMPLETED: print_color('PASS: {} succeed'.format(test_message), LOG_COLORS.GREEN) succeed_playbooks.append(playbook_id) elif status == PB_Status.NOT_SUPPORTED_VERSION: print('PASS: {} skipped - not supported version'.format(test_message)) succeed_playbooks.append(playbook_id) else: print_error('Failed: {} failed'.format(test_message)) playbook_id_with_mock = playbook_id if not is_mock_run: playbook_id_with_mock += " (Mock Disabled)" failed_playbooks.append(playbook_id_with_mock) notify_failed_test(slack, circle_ci, playbook_id, build_number, inc_id, server_url, build_name) succeed = status == PB_Status.COMPLETED or status == PB_Status.NOT_SUPPORTED_VERSION return succeed
def __enable_integrations_instances(client, module_instances): for configured_instance in module_instances: # tested with POSTMAN, this is the minimum required fields for the request. module_instance = { key: configured_instance[key] for key in ['id', 'brand', 'name', 'data', 'isIntegrationScript', ] } module_instance['enable'] = "true" module_instance['version'] = -1 try: res = demisto_client.generic_request_func(self=client, method='PUT', path='/settings/integration', body=module_instance) except ApiException as conn_err: print_error( 'Failed to enable integration instance, error trying to communicate with demisto ' 'server: {} '.format(conn_err) ) if res[1] != 200: print_error('Enabling instance failed with status code ' + str(res[1]) + '\n' + pformat(res))
def is_id_not_modified(self, change_string=None): """Check if the ID of the file has been changed. Args: change_string (string): the string that indicates the changes done on the file(git diff) Returns: bool. Whether the file's ID has been modified or not. """ if self.is_renamed: print_warning("ID might have changed, please make sure to check you have the correct one.") return True if not change_string: change_string = run_command("git diff HEAD {}".format(self.file_path)) if re.search("[+-]( )?id: .*", change_string): print_error("You've changed the ID of the file {0} please undo.".format(self.file_path)) self._is_valid = False return self._is_valid
def is_changed_command_name_or_arg(self): """Check if a command name or argument as been changed. Returns: bool. Whether a command name or argument as been changed. """ current_command_to_args = self._get_command_to_args( self.current_integration) old_command_to_args = self._get_command_to_args(self.old_integration) for command, args_dict in old_command_to_args.items(): if command not in current_command_to_args.keys() or \ not self.is_subset_dictionary(current_command_to_args[command], args_dict): print_error( "Possible backwards compatibility break, You've changed the name of a command or its arg in" " the file {0} please undo, the command was:\n{1}".format( self.file_path, command)) self._is_valid = False return True return False
def get_modified_files(files_string): """Get lists of the modified files in your branch according to the files string. Args: files_string (string): String that was calculated by git using `git diff` command. Returns: (modified_files_list, added_files_list, deleted_files). Tuple of sets. """ all_files = files_string.split('\n') deleted_files = set([]) added_files_list = set([]) modified_files_list = set([]) for f in all_files: file_data = f.split() if not file_data: continue file_status = file_data[0] file_path = file_data[1] if checked_type(file_path, CODE_FILES_REGEX): dir_path = os.path.dirname(file_path) file_path = glob.glob(dir_path + "/*.yml")[0] elif file_path.endswith('.js') or file_path.endswith('.py'): continue if file_status.lower() == 'm' and checked_type(file_path) and not file_path.startswith('.'): modified_files_list.add(file_path) elif file_status.lower() == 'a' and checked_type(file_path) and not file_path.startswith('.'): added_files_list.add(file_path) elif file_status.lower() == 'd' and checked_type(file_path) and not file_path.startswith('.'): deleted_files.add(file_path) elif file_status.lower().startswith('r') and checked_type(file_path): modified_files_list.add((file_data[1], file_data[2])) elif file_status.lower() not in KNOWN_FILE_STATUSES: print_error(file_path + " file status is an unknown known one, " "please check. File status was: " + file_status) return modified_files_list, added_files_list, deleted_files
def __disable_integrations_instances(client, module_instances): for configured_instance in module_instances: # tested with POSTMAN, this is the minimum required fields for the request. module_instance = { key: configured_instance[key] for key in [ 'id', 'brand', 'name', 'data', 'isIntegrationScript', ] } module_instance['enable'] = "false" module_instance['version'] = -1 try: res = client.req('PUT', '/settings/integration', module_instance) except requests.exceptions.RequestException as conn_err: print_error( 'Failed to disable integration instance, error trying to communicate with demisto server: {} ' .format(conn_err)) if res.status_code != 200: print_error('disable instance failed with status code ' + str(res.status_code)) print_error(pformat(res.json()))
def is_valid_in_id_set(self, file_path, obj_data, obj_set): """Check if the file is represented correctly in the id_set Args: file_path (string): Path to the file. obj_data (dict): Dictionary that holds the extracted details from the given file. obj_set (set): The set in which the file should be located at. Returns: bool. Whether the file is represented correctly in the id_set or not. """ is_found = False file_id = obj_data.keys()[0] for checked_instance in obj_set: checked_instance_id = checked_instance.keys()[0] checked_instance_data = checked_instance[checked_instance_id] checked_instance_toversion = checked_instance_data.get( 'toversion', '99.99.99') checked_instance_fromversion = checked_instance_data.get( 'fromversion', '0.0.0') obj_to_version = obj_data[file_id].get('toversion', '99.99.99') obj_from_version = obj_data[file_id].get('fromversion', '0.0.0') if checked_instance_id == file_id and checked_instance_toversion == obj_to_version and \ checked_instance_fromversion == obj_from_version: is_found = True if checked_instance_data != obj_data[file_id]: print_error( "You have failed to update id_set.json with the data of {} " "please run `python Tests/scripts/update_id_set.py`". format(file_path)) return False if not is_found: print_error( "You have failed to update id_set.json with the data of {} " "please run `python Tests/scripts/update_id_set.py`".format( file_path)) return is_found
def __test_integration_instance(client, module_instance): try: response_data, response_code, _ = demisto_client.generic_request_func( self=client, method='POST', path='/settings/integration/test', body=module_instance) except ApiException as conn_err: print_error( 'Failed to test integration instance, error trying to communicate with demisto ' 'server: {} '.format(conn_err)) return False if int(response_code) != 200: print_error( 'Integration-instance test ("Test" button) failed.\nBad status code: ' + str(response_code)) return False result_object = ast.literal_eval(response_data) success = result_object['success'] if not success: print_error('Test integration failed.\n Failure message: ' + result_object['message']) return False return True
def main(): circle_aritfact = sys.argv[1] envfile = sys.argv[2] with open(envfile, 'r') as json_file: env_results = json.load(json_file) for env in env_results: if not os.path.isfile("./Tests/is_build_failed_{}.txt".format( env["Role"].replace(' ', ''))): ssh_string = 'ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null {}@{} ' \ '"sudo chmod -R 755 /var/log/demisto"' scp_string = 'scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ' \ '{}@{}:/var/log/demisto/server.log {} || echo "WARN: Failed downloading server.log"' try: subprocess.check_output(ssh_string.format( env["SSHuser"], env["InstanceDNS"]), shell=True) except subprocess.CalledProcessError as exc: print(exc.output) try: subprocess.check_output(scp_string.format( env["SSHuser"], env["InstanceDNS"], "{}/server_{}.log".format(circle_aritfact, env["Role"].replace(' ', ''))), shell=True) except subprocess.CalledProcessError as exc: print(exc.output) rminstance = aws_functions.destroy_instance( env["Region"], env["InstanceID"]) if aws_functions.isError(rminstance): print_error(rminstance) else: print_warning("Tests failed on {} ,keeping instance alive".format( env["Role"]))
def __create_incident_with_playbook(client, name, playbook_id): # create incident kwargs = {'createInvestigation': True, 'playbookId': playbook_id} response_json = {} try: r = client.CreateIncident(name, None, None, None, None, None, None, **kwargs) response_json = r.json() except RuntimeError as err: print_error(str(err)) inc_id = response_json.get('id', 'incCreateErr') if inc_id == 'incCreateErr': print_error(INC_CREATION_ERR) return False, -1 # get incident incidents = client.SearchIncidents(0, 50, 'id:' + inc_id) # poll the incidents queue for a max time of 25 seconds timeout = time.time() + 25 while incidents['total'] != 1: incidents = client.SearchIncidents(0, 50, 'id:' + inc_id) if time.time() > timeout: print_error('Got timeout for searching incident with id {}, ' 'got {} incidents in the search'.format(inc_id, incidents['total'])) return False, -1 time.sleep(1) return incidents['data'][0], inc_id
def __create_incident_with_playbook(client, name, playbook_id): # create incident kwargs = {'createInvestigation': True, 'playbookId': playbook_id} response_json = {} try: r = client.CreateIncident(name, None, None, None, None, None, None, **kwargs) response_json = r.json() except RuntimeError as err: print_error(str(err)) inc_id = response_json.get('id', 'incCreateErr') if inc_id == 'incCreateErr': print_error(INC_CREATION_ERR) return False, -1 # get incident incidents = client.SearchIncidents(0, 50, 'id:' + inc_id) # poll the incidents queue for a max time of 25 seconds timeout = time.time() + 25 while incidents['total'] != 1: incidents = client.SearchIncidents(0, 50, 'id:' + inc_id) if time.time() > timeout: print_error('Got timeout for searching incident with id {}, ' 'got {} incidents in the search'.format( inc_id, incidents['total'])) return False, -1 time.sleep(1) return incidents['data'][0], inc_id
def get_secrets(branch_name, is_circle): secrets_found = {} secrets_found_string = '' if not run_command('git rev-parse -q --verify MERGE_HEAD'): secrets_file_paths = get_all_diff_text_files(branch_name, is_circle) secrets_found = search_potential_secrets(secrets_file_paths) if secrets_found: secrets_found_string += 'Secrets were found in the following files:\n' for file_name in secrets_found: secrets_found_string += ('\nFile Name: ' + file_name) secrets_found_string += json.dumps(secrets_found[file_name], indent=4) if not is_circle: secrets_found_string += 'Remove or whitelist secrets in order to proceed, then re-commit\n' else: secrets_found_string += 'The secrets were exposed in public repository,' \ ' remove the files asap and report it.\n' secrets_found_string += 'For more information about whitelisting please visit: ' \ 'https://github.com/demisto/internal-content/tree/master/documentation/secrets' if secrets_found: print_error(secrets_found_string) return secrets_found
def get_test_list(files_string, branch_name): """Create a test list that should run""" modified_files, modified_tests_list, all_tests, is_conf_json, sample_tests = get_modified_files(files_string) tests = set([]) if modified_files: tests = find_tests_for_modified_files(modified_files) for file_path in modified_tests_list: test = collect_ids(file_path) if test not in tests: tests.add(test) if is_conf_json: tests = tests.union(get_test_from_conf(branch_name)) if all_tests: print_warning('Running all tests due to: {}'.format(','.join(all_tests))) tests.add("Run all tests") if sample_tests: # Choosing 3 random tests for infrastructure testing print_warning('Running sample tests due to: {}'.format(','.join(sample_tests))) test_ids = get_test_ids(check_nightly_status=True) for _ in range(3): tests.add(random.choice(test_ids)) if not tests: if modified_files or modified_tests_list or all_tests: print_error("There are no tests that check the changes you've done, please make sure you write one") sys.exit(1) else: print_warning("Running Sanity check only") tests.add('DocumentationTest') # test with integration configured tests.add('TestCommonPython') # test with no integration configured return tests
def __print_investigation_error(client, playbook_id, investigation_id): res = client.req('POST', '/investigation/' + urllib.quote(investigation_id), {}) if res.status_code == 200: entries = res.json()['entries'] print_error('Playbook ' + playbook_id + ' has failed:') for entry in entries: if entry['type'] == ENTRY_TYPE_ERROR: if entry['parentContent']: print_error('\t- Command: ' + entry['parentContent'].encode('utf-8')) print_error('\t- Body: ' + entry['contents'].encode('utf-8'))
def test_integration(client, integrations, playbook_id, options=None, is_mock_run=False): options = options if options is not None else {} # create integrations instances module_instances = [] for integration in integrations: integration_name = integration.get('name', None) integration_params = integration.get('params', None) is_byoi = integration.get('byoi', True) if is_mock_run: configure_proxy_unsecure(integration_params) module_instance = __create_integration_instance(client, integration_name, integration_params, is_byoi) if module_instance is None: print_error('Failed to create instance') __delete_integrations_instances(client, module_instances) return False, -1 module_instances.append(module_instance) print('Create integration %s succeed' % (integration_name, )) # create incident with playbook incident, inc_id = __create_incident_with_playbook(client, 'inc_%s' % (playbook_id, ), playbook_id) if not incident: return False, -1 investigation_id = incident['investigationId'] if investigation_id is None or len(investigation_id) == 0: print_error('Failed to get investigation id of incident:' + incident) return False, -1 timeout_amount = options['timeout'] if 'timeout' in options else DEFAULT_TIMEOUT timeout = time.time() + timeout_amount i = 1 # wait for playbook to finish run while True: # give playbook time to run time.sleep(1) # fetch status playbook_state = __get_investigation_playbook_state(client, investigation_id) if playbook_state == PB_Status.COMPLETED or playbook_state == PB_Status.NOT_SUPPORTED_VERSION: break if playbook_state == PB_Status.FAILED: print_error(playbook_id + ' failed with error/s') __print_investigation_error(client, playbook_id, investigation_id) break if time.time() > timeout: print_error(playbook_id + ' failed on timeout') break if i % DEFAULT_INTERVAL == 0: print('loop no. {}, playbook state is {}'.format(i / DEFAULT_INTERVAL, playbook_state)) i = i + 1 __disable_integrations_instances(client, module_instances) test_pass = playbook_state == PB_Status.COMPLETED or playbook_state == PB_Status.NOT_SUPPORTED_VERSION if test_pass: # delete incident __delete_incident(client, incident) # delete integration instance __delete_integrations_instances(client, module_instances) return playbook_state, inc_id
def __create_integration_instance(client, integration_name, integration_params, is_byoi): print('Configuring instance for {}'.format(integration_name)) # get configuration config (used for later rest api configuration = __get_integration_config(client, integration_name) if not configuration: return None module_configuration = configuration['configuration'] if not module_configuration: module_configuration = [] instance_name = (integration_name + '_test' + str(uuid.uuid4())).replace(' ', '_') # define module instance module_instance = { 'brand': configuration['name'], 'category': configuration['category'], 'configuration': configuration, 'data': [], 'enabled': "true", 'engine': '', 'id': '', 'isIntegrationScript': is_byoi, 'name': instance_name, 'passwordProtected': False, 'version': 0 } # set module params for param_conf in module_configuration: if param_conf['display'] in integration_params or param_conf['name'] in integration_params: # param defined in conf key = param_conf['display'] if param_conf['display'] in integration_params else param_conf['name'] if key == 'credentials': credentials = integration_params[key] param_value = { 'credential': '', 'identifier': credentials['identifier'], 'password': credentials['password'], 'passwordChanged': False } else: param_value = integration_params[key] param_conf['value'] = param_value param_conf['hasvalue'] = True elif param_conf['defaultValue']: # param is required - take default value param_conf['value'] = param_conf['defaultValue'] module_instance['data'].append(param_conf) res = client.req('PUT', '/settings/integration', module_instance) if res.status_code != 200: print_error('create instance failed with status code ' + str(res.status_code)) print_error(pformat(res.json())) return None integration_config = res.json() module_instance['id'] = integration_config['id'] # test integration test_succeed = __test_integration_instance(client, module_instance) if not test_succeed: __disable_integrations_instances(client, [module_instance]) return None return module_instance
def collect_tests(script_ids, playbook_ids, integration_ids, catched_scripts, catched_playbooks, tests_set): """Collect tests for the affected script_ids,playbook_ids,integration_ids. :param script_ids: The ids of the affected scripts in your change set. :param playbook_ids: The ids of the affected playbooks in your change set. :param integration_ids: The ids of the affected integrations in your change set. :param catched_scripts: The names of the scripts we already identified a test for. :param catched_playbooks: The names of the scripts we already v a test for. :param tests_set: The names of the tests we alredy identified. :return: (test_ids, missing_ids) - All the names of possible tests, the ids we didn't match a test for. """ caught_missing_test = False catched_intergrations = set([]) test_ids = get_test_ids() with open("./Tests/id_set.json", 'r') as conf_file: id_set = json.load(conf_file) integration_set = id_set['integrations'] test_playbooks_set = id_set['TestPlaybooks'] integration_to_command = get_integration_commands(integration_ids, integration_set) for test_playbook in test_playbooks_set: detected_usage = False test_playbook_id = test_playbook.keys()[0] test_playbook_data = test_playbook.values()[0] test_playbook_name = test_playbook_data.get('name') for script in test_playbook_data.get('implementing_scripts', []): if script in script_ids: detected_usage = True tests_set.add(test_playbook_id) catched_scripts.add(script) for playbook in test_playbook_data.get('implementing_playbooks', []): if playbook in playbook_ids: detected_usage = True tests_set.add(test_playbook_id) catched_playbooks.add(playbook) if integration_to_command: command_to_integration = test_playbook_data.get('command_to_integration', {}) for command in test_playbook_data.get('command_to_integration', {}).keys(): for integration_id, integration_commands in integration_to_command.items(): if command in integration_commands: if not command_to_integration.get(command) or \ command_to_integration.get(command) == integration_id: detected_usage = True tests_set.add(test_playbook_id) catched_intergrations.add(integration_id) if detected_usage and test_playbook_id not in test_ids: caught_missing_test = True print_error("The playbook {} does not appear in the conf.json file, which means no test with it will run." "please update the conf.json file accordingly".format(test_playbook_name)) missing_ids = update_missing_sets(catched_intergrations, catched_playbooks, catched_scripts, integration_ids, playbook_ids, script_ids) return test_ids, missing_ids, caught_missing_test