def main(): instance_ips = [] with open('./Tests/instance_ids.txt', 'r') as instance_file: ami_instances = instance_file.readlines() ami_instances = [line.strip('\n').split(":") for line in ami_instances if line.strip('\n').split(":") != ['']] id_to_ip = {} for ami_instance_name, ami_instance_id in ami_instances: print "Validating ami instance: {}".format(ami_instance_name) run_command("./Tests/scripts/get_instance_ip.sh {}".format(ami_instance_id)) # get_instance_ip.sh script is writing the ip to instance_ips.txt because we couldn't get the ip # from the output of the aws script with open('./Tests/instance_ips.txt', 'r') as instance_file: instance_ip = instance_file.read() instance_ip = instance_ip.strip() print("The IP of the instance is {}\n".format(instance_ip)) id_to_ip[ami_instance_id] = instance_ip print("Waiting 90 Seconds for SSH to start\n") sleep(90) threads_list = [] for ami_instance_name, ami_instance_id in ami_instances: t = Thread(target=run_command, args=("./Tests/scripts/copy_content_data.sh {}".format(id_to_ip[ami_instance_id]), ), kwargs={'is_silenced': False}) threads_list.append(t) # copy_content_data.sh also starts the server instance_ips.append("{}:{}".format(ami_instance_name, id_to_ip[ami_instance_id])) run_threads_list(threads_list) with open('./Tests/instance_ips.txt', 'w') as instance_file: instance_file.write('\n'.join(instance_ips))
def update_id_set(): branches = run_command("git branch") branch_name_reg = re.search("\* (.*)", branches) branch_name = branch_name_reg.group(1) print("Getting added files") files_string = run_command("git diff --name-status HEAD") second_files_string = run_command("git diff --name-status origin/master...{}".format(branch_name)) added_files, modified_files, added_scripts, modified_scripts = \ get_changed_files(files_string + '\n' + second_files_string) if added_files or modified_files or added_scripts or modified_scripts: print("Updating id_set.json") with open('./Tests/id_set.json', 'r') as id_set_file: try: ids_dict = json.load(id_set_file, object_pairs_hook=OrderedDict) except ValueError, ex: if "Expecting property name" in ex.message: # if we got this error it means we have corrupted id_set.json # usually it will happen if we merged from master and we had a conflict in id_set.json # so we checkout the id_set.json to be exact as in master and then run update_id_set run_command("git checkout origin/master Tests/id_set.json") with open('./Tests/id_set.json', 'r') as id_set_file_from_master: ids_dict = json.load(id_set_file_from_master, object_pairs_hook=OrderedDict) else: raise ex test_playbook_set = ids_dict['TestPlaybooks'] integration_set = ids_dict['integrations'] playbook_set = ids_dict['playbooks'] script_set = ids_dict['scripts']
def create_test_file(is_nightly): """Create a file containing all the tests we need to run for the CI""" tests_string = '' if not is_nightly: branches = run_command("git branch") branch_name_reg = re.search(r"\* (.*)", branches) branch_name = branch_name_reg.group(1) print("Getting changed files from the branch: {0}".format(branch_name)) if branch_name != 'master': files_string = run_command("git diff --name-status origin/master...{0}".format(branch_name)) else: commit_string = run_command("git log -n 2 --pretty='%H'") commit_string = commit_string.replace("'", "") last_commit, second_last_commit = commit_string.split() files_string = run_command("git diff --name-status {}...{}".format(second_last_commit, last_commit)) tests = get_test_list(files_string, branch_name) tests_string = '\n'.join(tests) if tests_string: print('Collected the following tests:\n{0}\n'.format(tests_string)) else: print('No filter configured, running all tests') print("Creating filter_file.txt") with open("./Tests/filter_file.txt", "w") as filter_file: filter_file.write(tests_string)
def create_instance(ami_name): print("Creating instance from the AMI image for {}".format(AMI_NAME_TO_READABLE[ami_name])) run_command("./Tests/scripts/create_instance.sh instance.json {}".format(ami_name)) # noqa with open('./Tests/instance_ids.txt', 'r') as instance_file: instance_id = instance_file.read() with open('image_id.txt', 'r') as image_id_file: image_data = image_id_file.read() print('Image data is {}'.format(image_data)) with open("./Tests/images_data.txt", "a") as image_data_file: image_data_file.write( '{name} Image info is: {data}\n'.format(name=AMI_NAME_TO_READABLE[ami_name], data=image_data)) return instance_id
def main(): """Execute FilesValidator checks on the modified changes in your branch, or all files in case of master. This script runs both in a local and a remote environment. In a local environment we don't have any logger assigned, and then pykwalify raises an error, since it is logging the validation results. Therefore, if we are in a local env, we set up a logger. Also, we set the logger's level to critical so the user won't be disturbed by non critical loggings """ branches = run_command("git branch") branch_name_reg = re.search("\* (.*)", branches) branch_name = branch_name_reg.group(1) parser = argparse.ArgumentParser(description='Utility CircleCI usage') parser.add_argument('-c', '--circle', type=str2bool, default=False, help='Is CircleCi or not') parser.add_argument('-b', '--backwardComp', type=str2bool, default=True, help='To check backward compatibility.') options = parser.parse_args() is_circle = options.circle is_backward_check = options.backwardComp logging.basicConfig(level=logging.CRITICAL) print_color("Starting validating files structure", LOG_COLORS.GREEN) files_validator = FilesValidator(is_circle) if not files_validator.is_valid_structure(branch_name, is_backward_check=is_backward_check): sys.exit(1) print_color("Finished validating files structure", LOG_COLORS.GREEN) sys.exit(0)
def slack_notifier(slack_token, secret_conf_path, server, user, password, build_url): branches = run_command("git branch") branch_name_reg = re.search("\* (.*)", branches) branch_name = branch_name_reg.group(1) if branch_name == 'master': print_color("Starting Slack notifications about instances", LOG_COLORS.GREEN) attachments, integrations_counter = get_attachments(secret_conf_path, server, user, password, build_url) sc = SlackClient(slack_token) sc.api_call( "chat.postMessage", channel="devops-events", username="******", as_user="******", attachments=attachments, text="You have {0} instances configurations".format(integrations_counter) ) sc.api_call( "chat.postMessage", channel="content-lab-tests", username="******", as_user="******", attachments=attachments, text="You have {0} instances configurations".format(integrations_counter) )
def __init__(self, file_path, check_git=True): self._is_valid = True self.file_path = file_path if check_git: self.change_string = run_command("git diff origin/master {0}".format(self.file_path)) with open(file_path, 'r') as file_data: self.yaml_data = yaml.safe_load(file_data)
def get_all_diff_text_files(branch_name, is_circle): """ Get all new/modified text files that need to be searched for secrets :param branch_name: current branch being worked on :param is_circle: boolean to check if being ran from circle :return: list: list of text files """ if is_circle: branch_changed_files_string = \ run_command("git diff --name-status origin/master...{}".format(branch_name)) text_files_list = get_diff_text_files(branch_changed_files_string) else: local_changed_files_string = run_command("git diff --name-status --no-merges HEAD") text_files_list = get_diff_text_files(local_changed_files_string) return text_files_list
def get_modified_and_added_files(self, branch_name, is_circle): """Get lists of the modified and added files in your branch according to the git diff output. Args: branch_name (string): The name of the branch we are working on. is_circle (bool): Whether we are running on circle or local env. Returns: (modified_files, added_files). Tuple of sets. """ all_changed_files_string = run_command("git diff --name-status origin/master...{}".format(branch_name)) modified_files, added_files, _ = self.get_modified_files(all_changed_files_string) if not is_circle: files_string = run_command("git diff --name-status --no-merges HEAD") non_committed_modified_files, non_committed_added_files, non_committed_deleted_files = \ self.get_modified_files(files_string) all_changed_files_string = run_command("git diff --name-status origin/master") modified_files_from_master, added_files_from_master, _ = self.get_modified_files(all_changed_files_string) for mod_file in modified_files_from_master: if mod_file in non_committed_modified_files: modified_files.add(mod_file) for add_file in added_files_from_master: if add_file in non_committed_added_files: added_files.add(add_file) modified_files = modified_files - set(non_committed_deleted_files) added_files = added_files - set(non_committed_modified_files) - set(non_committed_deleted_files) new_added_files = set([]) for added_file in added_files: if added_file in non_committed_added_files: new_added_files.add(added_file) added_files = new_added_files return modified_files, added_files
def slack_notifier(build_url, slack_token): branches = run_command("git branch") branch_name_reg = re.search("\* (.*)", branches) branch_name = branch_name_reg.group(1) if branch_name == 'master': print_color("Starting Slack notifications about nightly build", LOG_COLORS.GREEN) print("Extracting build status") content_team_attachments, content_attachments = get_attachments(build_url) print("Sending Slack messages to #content and #content-team") sc = SlackClient(slack_token) sc.api_call( "chat.postMessage", channel="content-team", username="******", as_user="******", attachments=content_team_attachments )
def get_secrets(branch_name, is_circle): secrets_found = {} secrets_found_string = '' if not run_command('git rev-parse -q --verify MERGE_HEAD'): secrets_file_paths = get_all_diff_text_files(branch_name, is_circle) secrets_found = search_potential_secrets(secrets_file_paths) if secrets_found: secrets_found_string += 'Secrets were found in the following files:\n' for file_name in secrets_found: secrets_found_string += ('\nFile Name: ' + file_name) secrets_found_string += json.dumps(secrets_found[file_name], indent=4) if not is_circle: secrets_found_string += 'Remove or whitelist secrets in order to proceed, then re-commit\n' else: secrets_found_string += 'The secrets were exposed in public repository,' \ ' remove the files asap and report it.\n' secrets_found_string += 'For more information about whitelisting please visit: ' \ 'https://github.com/demisto/internal-content/tree/master/documentation/secrets' if secrets_found: print_error(secrets_found_string) return secrets_found
def get_test_from_conf(branch_name): tests = set([]) changed = set([]) change_string = run_command("git diff origin/master...{} Tests/conf.json".format(branch_name)) added_groups = re.findall(r'(\+[ ]+")(.*)(":)', change_string) if added_groups: for group in added_groups: changed.add(group[1]) deleted_groups = re.findall(r'(-[ ]+")(.*)(":)', change_string) if deleted_groups: for group in deleted_groups: changed.add(group[1]) with open("./Tests/conf.json", 'r') as conf_file: conf = json.load(conf_file) conf_tests = conf['tests'] for t in conf_tests: playbook_id = t['playbookID'] integrations_conf = t.get('integrations', []) if playbook_id in changed: tests.add(playbook_id) continue if not isinstance(integrations_conf, list): integrations_conf = [integrations_conf] for integration in integrations_conf: if integration in changed: tests.add(playbook_id) if not tests: tests.add('changed skip section') return tests
def get_test_from_conf(branch_name): tests = set([]) changed = set([]) change_string = run_command("git diff origin/master...{} Tests/conf.json".format(branch_name)) added_groups = re.findall(r'(\+[ ]+")(.*)(":)', change_string) if added_groups: for group in added_groups: changed.add(group[1]) deleted_groups = re.findall(r'(-[ ]+")(.*)(":)', change_string) if deleted_groups: for group in deleted_groups: changed.add(group[1]) with open("./Tests/conf.json", 'r') as conf_file: conf = json.load(conf_file) conf_tests = conf['tests'] for t in conf_tests: playbook_id = t['playbookID'] integrations_conf = t.get('integrations', []) if playbook_id in changed: tests.add(playbook_id) continue if not isinstance(integrations_conf, list): integrations_conf = [integrations_conf] for integration in integrations_conf: if integration in changed: tests.add(playbook_id) if not tests: tests.add('changed skip section') return tests
def update_object_in_id_set(obj_id, obj_data, file_path, instances_set): change_string = run_command("git diff HEAD {0}".format(file_path)) is_added_from_version = True if re.search('\+fromversion: .*', change_string) else False is_added_to_version = True if re.search('\+toversion: .*', change_string) else False file_to_version = get_to_version(file_path) file_from_version = get_from_version(file_path) updated = False for instance in instances_set: instance_id = instance.keys()[0] integration_to_version = instance[instance_id].get('toversion', '99.99.99') integration_from_version = instance[instance_id].get('fromversion', '0.0.0') if obj_id == instance_id: if is_added_from_version or (not is_added_from_version and file_from_version == integration_from_version): if is_added_to_version or (not is_added_to_version and file_to_version == integration_to_version): instance[obj_id] = obj_data[obj_id] updated = True break if not updated: # in case we didn't found then we need to create one add_new_object_to_id_set(obj_id, obj_data, instances_set)
def get_secrets(branch_name, is_circle): secrets_found = {} secrets_found_string = '' if not run_command('git rev-parse -q --verify MERGE_HEAD'): secrets_file_paths = get_all_diff_text_files(branch_name, is_circle) secrets_found = search_potential_secrets(secrets_file_paths) if secrets_found: secrets_found_string += 'Secrets were found in the following files:\n' for file_name in secrets_found: secrets_found_string += ('\nFile Name: ' + file_name) secrets_found_string += json.dumps(secrets_found[file_name], indent=4) if not is_circle: secrets_found_string += 'Remove or whitelist secrets in order to proceed, then re-commit\n' else: secrets_found_string += 'The secrets were exposed in public repository,' \ ' remove the files asap and report it.\n' secrets_found_string += 'For more information about whitelisting please visit: ' \ 'https://github.com/demisto/internal-content/tree/master/documentation/secrets' if secrets_found: print_error(secrets_found_string) return secrets_found
def get_branch_name(): branches = run_command('git branch') branch_name_reg = re.search(r'\* (.*)', branches) branch_name = branch_name_reg.group(1) return branch_name
def update_id_set(): branches = run_command("git branch") branch_name_reg = re.search(r"\* (.*)", branches) branch_name = branch_name_reg.group(1) print("Getting added files") files_string = run_command("git diff --name-status HEAD") second_files_string = run_command("git diff --name-status origin/master...{}".format(branch_name)) added_files, modified_files, added_scripts, modified_scripts = \ get_changed_files(files_string + '\n' + second_files_string) if added_files or modified_files or added_scripts or modified_scripts: print("Updating id_set.json") with open('./Tests/id_set.json', 'r') as id_set_file: try: ids_dict = json.load(id_set_file, object_pairs_hook=OrderedDict) except ValueError as ex: if "Expecting property name" in str(ex): # if we got this error it means we have corrupted id_set.json # usually it will happen if we merged from master and we had a conflict in id_set.json # so we checkout the id_set.json to be exact as in master and then run update_id_set run_command("git checkout origin/master Tests/id_set.json") with open('./Tests/id_set.json', 'r') as id_set_file_from_master: ids_dict = json.load(id_set_file_from_master, object_pairs_hook=OrderedDict) else: raise test_playbook_set = ids_dict['TestPlaybooks'] integration_set = ids_dict['integrations'] playbook_set = ids_dict['playbooks'] script_set = ids_dict['scripts'] if added_files: for file_path in added_files: if re.match(INTEGRATION_REGEX, file_path, re.IGNORECASE) or \ re.match(INTEGRATION_YML_REGEX, file_path, re.IGNORECASE): add_new_object_to_id_set(get_script_or_integration_id(file_path), get_integration_data(file_path), integration_set) print("Adding {0} to id_set".format(get_script_or_integration_id(file_path))) if re.match(SCRIPT_REGEX, file_path, re.IGNORECASE): add_new_object_to_id_set(get_script_or_integration_id(file_path), get_script_data(file_path), script_set) print("Adding {0} to id_set".format(get_script_or_integration_id(file_path))) if re.match(PLAYBOOK_REGEX, file_path, re.IGNORECASE): add_new_object_to_id_set(collect_ids(file_path), get_playbook_data(file_path), playbook_set) print("Adding {0} to id_set".format(collect_ids(file_path))) if re.match(TEST_PLAYBOOK_REGEX, file_path, re.IGNORECASE): add_new_object_to_id_set(collect_ids(file_path), get_playbook_data(file_path), test_playbook_set) print("Adding {0} to id_set".format(collect_ids(file_path))) if re.match(TEST_SCRIPT_REGEX, file_path, re.IGNORECASE): add_new_object_to_id_set(get_script_or_integration_id(file_path), get_script_data(file_path), script_set) print("Adding {0} to id_set".format(collect_ids(file_path))) if modified_files: for file_path in modified_files: if re.match(INTEGRATION_REGEX, file_path, re.IGNORECASE) or \ re.match(INTEGRATION_YML_REGEX, file_path, re.IGNORECASE): id = get_script_or_integration_id(file_path) integration_data = get_integration_data(file_path) update_object_in_id_set(id, integration_data, file_path, integration_set) print("updated {0} in id_set".format(id)) if re.match(SCRIPT_REGEX, file_path, re.IGNORECASE) or re.match(TEST_SCRIPT_REGEX, file_path, re.IGNORECASE): id = get_script_or_integration_id(file_path) script_data = get_script_data(file_path) update_object_in_id_set(id, script_data, file_path, script_set) print("updated {0} in id_set".format(id)) if re.match(PLAYBOOK_REGEX, file_path, re.IGNORECASE): id = collect_ids(file_path) playbook_data = get_playbook_data(file_path) update_object_in_id_set(id, playbook_data, file_path, playbook_set) print("updated {0} in id_set".format(id)) if re.match(TEST_PLAYBOOK_REGEX, file_path, re.IGNORECASE): id = collect_ids(file_path) playbook_data = get_playbook_data(file_path) update_object_in_id_set(id, playbook_data, file_path, test_playbook_set) print("updated {0} in id_set".format(id)) if added_scripts: for added_script_package in added_scripts: yml_path, code = get_script_package_data(added_script_package) add_new_object_to_id_set(get_script_or_integration_id(yml_path), get_script_data(yml_path, script_code=code), script_set) print("Adding {0} to id_set".format(get_script_or_integration_id(yml_path))) if modified_scripts: for modified_script_package in added_scripts: yml_path, code = get_script_package_data(modified_script_package) update_object_in_id_set(get_script_or_integration_id(yml_path), get_script_data(yml_path, script_code=code), yml_path, script_set) print("Adding {0} to id_set".format(get_script_or_integration_id(yml_path))) if added_files or modified_files: new_ids_dict = OrderedDict() # we sort each time the whole set in case someone manually changed something # it shouldn't take too much time new_ids_dict['scripts'] = sort(script_set) new_ids_dict['playbooks'] = sort(playbook_set) new_ids_dict['integrations'] = sort(integration_set) new_ids_dict['TestPlaybooks'] = sort(test_playbook_set) with open('./Tests/id_set.json', 'w') as id_set_file: json.dump(new_ids_dict, id_set_file, indent=4) print("Finished updating id_set.json")
def get_last_release_version(): tags = run_command('git tag').split('\n') tags = [tag for tag in tags if re.match(r'\d+\.\d+\.\d+', tag) is not None] tags.sort(cmp=server_version_compare, reverse=True) return tags[0]
def main(): """Execute FilesValidator checks on the modified changes in your branch, or all files in case of master. This script runs both in a local and a remote environment. In a local environment we don't have any logger assigned, and then pykwalify raises an error, since it is logging the validation results. Therefore, if we are in a local env, we set up a logger. Also, we set the logger's level to critical so the user won't be disturbed by non critical loggings """ branches = run_command("git branch") branch_name_reg = re.search("\* (.*)", branches) branch_name = branch_name_reg.group(1) parser = argparse.ArgumentParser(description='Utility CircleCI usage') parser.add_argument('-c', '--circle', type=str2bool, default=False, help='Is CircleCi or not') parser.add_argument('-b', '--backwardComp', type=str2bool, default=True, help='To check backward compatibility.') parser.add_argument('-t', '--test-filter', type=str2bool, default=False, help='Check that tests are valid.') options = parser.parse_args() is_circle = options.circle is_backward_check = options.backwardComp logging.basicConfig(level=logging.CRITICAL) print_color("Starting validating files structure", LOG_COLORS.GREEN) files_validator = FilesValidator(is_circle) if not files_validator.is_valid_structure( branch_name, is_backward_check=is_backward_check): sys.exit(1) if options.test_filter: try: print_color( "Updating idset. Be patient if this is the first time...", LOG_COLORS.YELLOW) subprocess.check_output(["./Tests/scripts/update_id_set.py"]) print_color("Checking that we have tests for all content...", LOG_COLORS.YELLOW) try: tests_out = subprocess.check_output( ["./Tests/scripts/configure_tests.py", "-s", "true"], stderr=subprocess.STDOUT) print(tests_out) except Exception: print_color( "Recreating idset to be sure that configure tests failure is accurate." " Be patient this can take 15-20 seconds ...", LOG_COLORS.YELLOW) subprocess.check_output( ["./Tests/scripts/update_id_set.py", "-r"]) print_color( "Checking that we have tests for all content again...", LOG_COLORS.YELLOW) subprocess.check_call( ["./Tests/scripts/configure_tests.py", "-s", "true"]) except Exception as ex: print_color("Failed validating tests: {}".format(ex), LOG_COLORS.RED) sys.exit(1) print_color("Finished validating files structure", LOG_COLORS.GREEN) sys.exit(0)
def main(): """Execute FilesValidator checks on the modified changes in your branch, or all files in case of master. This script runs both in a local and a remote environment. In a local environment we don't have any logger assigned, and then pykwalify raises an error, since it is logging the validation results. Therefore, if we are in a local env, we set up a logger. Also, we set the logger's level to critical so the user won't be disturbed by non critical loggings """ branches = run_command('git branch') branch_name_reg = re.search(r'\* (.*)', branches) branch_name = branch_name_reg.group(1) parser = argparse.ArgumentParser(description='Utility CircleCI usage') parser.add_argument('-c', '--circle', type=str2bool, default=False, help='Is CircleCi or not') parser.add_argument('-b', '--backwardComp', type=str2bool, default=True, help='To check backward compatibility.') parser.add_argument('-t', '--test-filter', type=str2bool, default=False, help='Check that tests are valid.') parser.add_argument( '-p', '--prev-ver', help='Previous branch or SHA1 commit to run checks against.') options = parser.parse_args() is_circle = options.circle is_backward_check = options.backwardComp is_forked = re.match(EXTERNAL_PR_REGEX, branch_name) is not None logging.basicConfig(level=logging.CRITICAL) print_color('Starting validating files structure', LOG_COLORS.GREEN) files_validator = FilesValidator(is_circle, print_ignored_files=True) if not files_validator.is_valid_structure( branch_name, is_backward_check=is_backward_check, prev_ver=options.prev_ver, is_forked=is_forked): sys.exit(1) if options.test_filter: try: print_warning( 'Updating idset. Be patient if this is the first time...') subprocess.check_output(['./Tests/scripts/update_id_set.py']) print_warning('Checking that we have tests for all content...') try: tests_out = subprocess.check_output( ['./Tests/scripts/configure_tests.py', '-s', 'true'], stderr=subprocess.STDOUT) print(tests_out) except Exception: print_warning( 'Recreating idset to be sure that configure tests failure is accurate.' ' Be patient this can take 15-20 seconds ...') subprocess.check_output( ['./Tests/scripts/update_id_set.py', '-r']) print_warning( 'Checking that we have tests for all content again...') subprocess.check_call( ['./Tests/scripts/configure_tests.py', '-s', 'true']) except Exception as ex: print_error('Failed validating tests: {}'.format(ex)) sys.exit(1) print_color('Finished validating files structure', LOG_COLORS.GREEN) sys.exit(0)