Ejemplo n.º 1
0
    def zip_pack(self):
        """Zips pack folder and excludes not wanted directories.

        Returns:
            bool: whether the operation succeeded.
            str: full path to created pack zip.
        """
        zip_pack_path = f"{self._pack_path}.zip"
        task_status = False

        try:
            with ZipFile(zip_pack_path, 'w', ZIP_DEFLATED) as pack_zip:
                for root, dirs, files in os.walk(self._pack_path,
                                                 topdown=True):
                    dirs[:] = [
                        d for d in dirs if d not in Pack.EXCLUDE_DIRECTORIES
                    ]

                    for f in files:
                        full_file_path = os.path.join(root, f)
                        relative_file_path = os.path.relpath(
                            full_file_path, self._pack_path)
                        pack_zip.write(filename=full_file_path,
                                       arcname=relative_file_path)

            task_status = True
            print_color(f"Finished zipping {self._pack_name} pack.",
                        LOG_COLORS.GREEN)
        except Exception as e:
            print_error(
                f"Failed in zipping {self._pack_name} folder.\n Additional info: {e}"
            )
        finally:
            return task_status, zip_pack_path
Ejemplo n.º 2
0
def slack_notifier(slack_token, secret_conf_path, server, user, password, build_url):
    branches = run_command("git branch")
    branch_name_reg = re.search("\* (.*)", branches)
    branch_name = branch_name_reg.group(1)

    if branch_name == 'master':
        print_color("Starting Slack notifications about instances", LOG_COLORS.GREEN)
        attachments, integrations_counter = get_attachments(secret_conf_path, server, user, password, build_url)

        sc = SlackClient(slack_token)
        sc.api_call(
            "chat.postMessage",
            channel="devops-events",
            username="******",
            as_user="******",
            attachments=attachments,
            text="You have {0} instances configurations".format(integrations_counter)
        )

        sc.api_call(
            "chat.postMessage",
            channel="content-lab-tests",
            username="******",
            as_user="******",
            attachments=attachments,
            text="You have {0} instances configurations".format(integrations_counter)
        )
Ejemplo n.º 3
0
def update_with_tests_sections(missing_ids, modified_files, test_ids, tests):
    test_ids.append(RUN_ALL_TESTS_FORMAT)
    # Search for tests section
    for file_path in modified_files:
        tests_from_file = get_tests(file_path)
        for test in tests_from_file:
            if test in test_ids or re.match(NO_TESTS_FORMAT, test,
                                            re.IGNORECASE):
                if re.match(INTEGRATION_REGEX, file_path, re.IGNORECASE) or \
                        re.match(BETA_INTEGRATION_REGEX, file_path, re.IGNORECASE):
                    _id = get_script_or_integration_id(file_path)

                else:
                    _id = get_name(file_path)

                missing_ids = missing_ids - {_id}
                tests.add(test)

            else:
                message = "The test '{0}' does not exist in the conf.json file, please re-check your code".format(
                    test)
                print_color(message, LOG_COLORS.RED)
                global _FAILED
                _FAILED = True

    return missing_ids
Ejemplo n.º 4
0
def get_test_list(modified_files, modified_tests_list, all_tests, is_conf_json,
                  branch_name):
    """Create a test list that should run"""
    tests = set([])
    if modified_files:
        tests = find_tests_for_modified_files(modified_files)

    for file_path in modified_tests_list:
        test = collect_ids(file_path)
        if test not in tests:
            tests.add(test)

    if is_conf_json:
        tests = tests.union(get_test_from_conf(branch_name))

    if all_tests:
        tests.add("Run all tests")

    if not tests and (modified_files or modified_tests_list or all_tests):
        print_color(
            "There are no tests that check the changes you've done, please make sure you write one",
            LOG_COLORS.RED)
        sys.exit(1)

    return tests
Ejemplo n.º 5
0
def main():
    options = options_handler()
    server = options.server
    is_ami = options.isAMI
    server_version = options.serverVersion

    if is_ami:  # Run tests in AMI configuration
        with open('./Tests/instance_ips.txt', 'r') as instance_file:
            instance_ips = instance_file.readlines()
            instance_ips = [
                line.strip('\n').split(":") for line in instance_ips
            ]

        for ami_instance_name, ami_instance_ip in instance_ips:
            if ami_instance_name == server_version and ami_instance_name != "Demisto two before GA":
                # TODO: remove the and condition once version 4.5 is out
                print_color("Starting tests for {}".format(ami_instance_name),
                            LOG_COLORS.GREEN)
                print("Starts tests with server url - https://{}".format(
                    ami_instance_ip))
                server = SERVER_URL.format(ami_instance_ip)
                execute_testing(server, ami_instance_ip, server_version)
                sleep(8)

    else:  # Run tests in Server build configuration
        with open('public_ip', 'rb') as f:
            public_ip = f.read().strip()

        execute_testing(server, public_ip, server_version)
Ejemplo n.º 6
0
def is_correct_content_installed(ips, content_version, api_key):
    # type: (AnyStr, List[List], AnyStr) -> bool
    """ Checks if specific content version is installed on server list

    Args:
        ips: list with lists of [instance_name, instance_ip]
        content_version: content version that should be installed
        api_key: the demisto api key to create an api client with.

    Returns:
        True: if all tests passed, False if one failure
    """

    for ami_instance_name, ami_instance_ip in ips:
        host = "https://{}".format(ami_instance_ip)

        client = demisto_client.configure(base_url=host,
                                          api_key=api_key,
                                          verify_ssl=False)
        try:
            resp_json = None
            try:
                resp = demisto_client.generic_request_func(
                    self=client,
                    path='/content/installed/',
                    method='POST',
                    accept='application/json',
                    content_type='application/json')
                resp_json = ast.literal_eval(resp[0])
            except ApiException as err:
                print(err)
            if not isinstance(resp_json, dict):
                raise ValueError(
                    'Response from server is not a Dict, got [{}].\n'
                    'Text: {}'.format(type(resp_json), resp_json))
            release = resp_json.get("release")
            notes = resp_json.get("releaseNotes")
            installed = resp_json.get("installed")
            if not (release and content_version in release and notes
                    and installed):
                print_error(
                    "Failed install content on instance [{}]\nfound content version [{}], expected [{}]"
                    "".format(ami_instance_name, release, content_version))
                return False
            else:
                print_color(
                    "Instance [{instance_name}] content verified with version [{content_version}]"
                    .format(instance_name=ami_instance_name,
                            content_version=release), LOG_COLORS.GREEN)
        except ValueError as exception:
            err_msg = "Failed to verify content version on server [{}]\n" \
                      "Error: [{}]\n".format(ami_instance_name, str(exception))
            if resp_json is not None:
                err_msg += "Server response: {}".format(resp_json)
            print_error(err_msg)
            return False
    print_color(
        "Content was installed successfully on all of the instances! :)",
        LOG_COLORS.GREEN)
    return True
Ejemplo n.º 7
0
def get_test_list(files_string, branch_name):
    """Create a test list that should run"""
    modified_files, modified_tests_list, all_tests, is_conf_json, infra_tests = get_modified_files(
        files_string)

    tests = set([])
    if modified_files:
        tests = find_tests_for_modified_files(modified_files)

    for file_path in modified_tests_list:
        test = collect_ids(file_path)
        if test not in tests:
            tests.add(test)

    if is_conf_json:
        tests = tests.union(get_test_from_conf(branch_name))

    if all_tests:
        tests.add("Run all tests")

    if infra_tests:  # Choosing 3 random tests for infrastructure testing
        test_ids = get_test_ids(check_nightly_status=True)
        for _ in range(3):
            tests.add(test_ids[random.randint(0, len(test_ids))])

    if not tests and (modified_files or modified_tests_list or all_tests):
        print_color(
            "There are no tests that check the changes you've done, please make sure you write one",
            LOG_COLORS.RED)
        sys.exit(1)

    return tests
Ejemplo n.º 8
0
def print_test_summary(succeed_playbooks,
                       failed_playbooks,
                       skipped_tests,
                       skipped_integration,
                       unmocklable_integrations,
                       proxy,
                       is_ami=True):
    succeed_count = len(succeed_playbooks)
    failed_count = len(failed_playbooks)
    skipped_count = len(skipped_tests)
    rerecorded_count = len(proxy.rerecorded_tests) if is_ami else 0
    empty_mocks_count = len(proxy.empty_files) if is_ami else 0
    unmocklable_integrations_count = len(unmocklable_integrations)

    print('\nTEST RESULTS:')
    print('\t Number of playbooks tested - ' +
          str(succeed_count + failed_count))
    print_color('\t Number of succeeded tests - ' + str(succeed_count),
                LOG_COLORS.GREEN)

    if failed_count > 0:
        print_error('\t Number of failed tests - ' + str(failed_count) + ':')
        for playbook_id in failed_playbooks:
            print_error('\t - ' + playbook_id)

    if rerecorded_count > 0:
        print_warning(
            '\t Tests with failed playback and successful re-recording - ' +
            str(rerecorded_count) + ':')
        for playbook_id in proxy.rerecorded_tests:
            print_warning('\t - ' + playbook_id)

    if empty_mocks_count > 0:
        print('\t Successful tests with empty mock files - ' +
              str(empty_mocks_count) + ':')
        print(
            '\t (either there were no http requests or no traffic is passed through the proxy.\n'
            '\t Investigate the playbook and the integrations.\n'
            '\t If the integration has no http traffic, add to unmockable_integrations in conf.json)'
        )
        for playbook_id in proxy.empty_files:
            print('\t - ' + playbook_id)

    if len(skipped_integration) > 0:
        print_warning('\t Number of skipped integration - ' +
                      str(len(skipped_integration)) + ':')
        for playbook_id in skipped_integration:
            print_warning('\t - ' + playbook_id)

    if skipped_count > 0:
        print_warning('\t Number of skipped tests - ' + str(skipped_count) +
                      ':')
        for playbook_id in skipped_tests:
            print_warning('\t - ' + playbook_id)

    if unmocklable_integrations_count > 0:
        print_warning('\t Number of unmockable integrations - ' +
                      str(unmocklable_integrations_count) + ':')
        for playbook_id, reason in unmocklable_integrations.items():
            print_warning('\t - ' + playbook_id + ' - ' + reason)
Ejemplo n.º 9
0
    def validate_all_files(self):
        """Validate all files in the repo are in the right format."""
        for regex in CHECKED_TYPES_REGEXES:
            splitted_regex = regex.split('.*')
            directory = splitted_regex[0]
            for root, dirs, files in os.walk(directory):
                if root not in DIR_LIST:  # Skipping in case we entered a package
                    continue
                print_color('Validating {} directory:'.format(directory), LOG_COLORS.GREEN)
                for file_name in files:
                    file_path = os.path.join(root, file_name)
                    # skipping hidden files
                    if file_name.startswith('.'):
                        continue

                    print('Validating ' + file_name)
                    structure_validator = StructureValidator(file_path)
                    if not structure_validator.is_valid_scheme():
                        self._is_valid = False

                if root in PACKAGE_SUPPORTING_DIRECTORIES:
                    for inner_dir in dirs:
                        file_path = glob.glob(os.path.join(root, inner_dir, '*.yml'))[0]
                        print('Validating ' + file_path)
                        structure_validator = StructureValidator(file_path)
                        if not structure_validator.is_valid_scheme():
                            self._is_valid = False
Ejemplo n.º 10
0
def init_storage_client(service_account=None):
    """Initialize google cloud storage client.

    In case of local dev usage the client will be initialized with user default credentials.
    Otherwise, client will be initialized from service account json that is stored in CirlceCI.

    Args:
        service_account (str): full path to service account json.

    Return:
        storage.Client: initialized google cloud storage client.
    """
    if service_account:
        storage_client = storage.Client.from_service_account_json(
            service_account)
        print_color("Created gcp service account", LOG_COLORS.GREEN)

        return storage_client
    else:
        # in case of local dev use, ignored the warning of non use of service account.
        warnings.filterwarnings(
            "ignore",
            message=google.auth._default._CLOUD_SDK_CREDENTIALS_WARNING)
        credentials, project = google.auth.default()
        storage_client = storage.Client(credentials=credentials,
                                        project=project)
        print_color("Created gcp privare account", LOG_COLORS.GREEN)

        return storage_client
Ejemplo n.º 11
0
def main():
    """Execute FilesValidator checks on the modified changes in your branch, or all files in case of master.

    This script runs both in a local and a remote environment. In a local environment we don't have any
    logger assigned, and then pykwalify raises an error, since it is logging the validation results.
    Therefore, if we are in a local env, we set up a logger. Also, we set the logger's level to critical
    so the user won't be disturbed by non critical loggings
    """
    branches = run_command("git branch")
    branch_name_reg = re.search("\* (.*)", branches)
    branch_name = branch_name_reg.group(1)

    parser = argparse.ArgumentParser(description='Utility CircleCI usage')
    parser.add_argument('-c', '--circle', type=str2bool, default=False, help='Is CircleCi or not')
    parser.add_argument('-b', '--backwardComp', type=str2bool, default=True, help='To check backward compatibility.')
    options = parser.parse_args()
    is_circle = options.circle
    is_backward_check = options.backwardComp

    logging.basicConfig(level=logging.CRITICAL)

    print_color("Starting validating files structure", LOG_COLORS.GREEN)
    files_validator = FilesValidator(is_circle)
    if not files_validator.is_valid_structure(branch_name, is_backward_check=is_backward_check):
        sys.exit(1)

    print_color("Finished validating files structure", LOG_COLORS.GREEN)
    sys.exit(0)
Ejemplo n.º 12
0
    def validate_against_previous_version(self,
                                          branch_sha,
                                          prev_branch_sha=None,
                                          no_error=False):
        """Validate all files that were changed between previous version and branch_sha

        Args:
            branch_sha (str): Current branch SHA1 to validate
            prev_branch_sha (str): Previous branch SHA1 to validate against
            no_error (bool): If set to true will restore self._is_valid after run (will not return new errors)
        """
        if not prev_branch_sha:
            with open('./.circleci/config.yml') as f:
                config = yaml.safe_load(f)
                prev_branch_sha = config['jobs']['build']['environment'][
                    'GIT_SHA1']

        print_color('Starting validation against {}'.format(prev_branch_sha),
                    LOG_COLORS.GREEN)
        modified_files, _, _ = self.get_modified_and_added_files(
            branch_sha, self.is_circle, prev_branch_sha)
        prev_self_valid = self._is_valid
        self.validate_modified_files(modified_files,
                                     is_backward_check=True,
                                     old_branch=prev_branch_sha)
        if no_error:
            self._is_valid = prev_self_valid
Ejemplo n.º 13
0
    def validate_all_files(self):
        """Validate all files in the repo are in the right format."""
        for regex in CHECKED_TYPES_REGEXES:
            splitted_regex = regex.split(".*")
            directory = splitted_regex[0]
            for root, dirs, files in os.walk(directory):
                if root not in DIR_LIST:  # Skipping in case we entered a package
                    continue
                print_color("Validating {} directory:".format(directory), LOG_COLORS.GREEN)
                for file_name in files:
                    file_path = os.path.join(root, file_name)
                    # skipping hidden files
                    if file_name.startswith('.'):
                        continue

                    print("Validating " + file_name)
                    structure_validator = StructureValidator(file_path)
                    if not structure_validator.is_valid_scheme():
                        self._is_valid = False

                if root in PACKAGE_SUPPORTING_DIRECTORIES:
                    for inner_dir in dirs:
                        file_path = glob.glob(os.path.join(root, inner_dir, '*.yml'))[0]
                        print("Validating " + file_path)
                        structure_validator = StructureValidator(file_path)
                        if not structure_validator.is_valid_scheme():
                            self._is_valid = False
Ejemplo n.º 14
0
def slack_notifier(slack_token, secret_conf_path, server, user, password,
                   build_url):
    branches = run_command("git branch")
    branch_name_reg = re.search("\* (.*)", branches)
    branch_name = branch_name_reg.group(1)

    if branch_name == 'master':
        print_color("Starting Slack notifications about instances",
                    LOG_COLORS.GREEN)
        attachments, integrations_counter = get_attachments(
            secret_conf_path, server, user, password, build_url)

        sc = SlackClient(slack_token)
        sc.api_call("chat.postMessage",
                    channel="devops-events",
                    username="******",
                    as_user="******",
                    attachments=attachments,
                    text="You have {0} instances configurations".format(
                        integrations_counter))

        sc.api_call("chat.postMessage",
                    channel="content-lab-tests",
                    username="******",
                    as_user="******",
                    attachments=attachments,
                    text="You have {0} instances configurations".format(
                        integrations_counter))
Ejemplo n.º 15
0
def find_duplicates(id_set):
    lists_to_return = []

    objects_to_check = [
        'integrations', 'scripts', 'playbooks', 'TestPlaybooks', 'Classifiers',
        'Dashboards', 'Layouts', 'Reports', 'Widgets'
    ]
    for object_type in objects_to_check:
        print_color("Checking diff for {}".format(object_type),
                    LOG_COLORS.GREEN)
        objects = id_set.get(object_type)
        ids = set(list(specific_item.keys())[0] for specific_item in objects)

        dup_list = []
        for id_to_check in ids:
            if has_duplicate(objects, id_to_check, object_type):
                dup_list.append(id_to_check)
        lists_to_return.append(dup_list)

    print_color("Checking diff for Incident and Idicator Fields",
                LOG_COLORS.GREEN)

    fields = id_set['IncidentFields'] + id_set['IndicatorFields']
    field_ids = set(list(field.keys())[0] for field in fields)

    field_list = []
    for field_to_check in field_ids:
        if has_duplicate(fields, field_to_check,
                         'Indicator and Incident Fields'):
            field_list.append(field_to_check)
    lists_to_return.append(field_list)

    return lists_to_return
Ejemplo n.º 16
0
def print_test_summary(tests_data_keeper, is_ami=True):
    succeed_playbooks = tests_data_keeper.succeeded_playbooks
    failed_playbooks = tests_data_keeper.failed_playbooks
    skipped_tests = tests_data_keeper.skipped_tests
    unmocklable_integrations = tests_data_keeper.unmockable_integrations
    skipped_integration = tests_data_keeper.skipped_integrations
    rerecorded_tests = tests_data_keeper.rerecorded_tests
    empty_files = tests_data_keeper.empty_files

    succeed_count = len(succeed_playbooks)
    failed_count = len(failed_playbooks)
    skipped_count = len(skipped_tests)
    rerecorded_count = len(rerecorded_tests) if is_ami else 0
    empty_mocks_count = len(empty_files) if is_ami else 0
    unmocklable_integrations_count = len(unmocklable_integrations)
    print('\nTEST RESULTS:')
    tested_playbooks_message = '\t Number of playbooks tested - ' + str(succeed_count + failed_count)
    print(tested_playbooks_message)
    succeeded_playbooks_message = '\t Number of succeeded tests - ' + str(succeed_count)
    print_color(succeeded_playbooks_message, LOG_COLORS.GREEN)

    if failed_count > 0:
        failed_tests_message = '\t Number of failed tests - ' + str(failed_count) + ':'
        print_error(failed_tests_message)
        for playbook_id in failed_playbooks:
            print_error('\t - ' + playbook_id)

    if rerecorded_count > 0:
        recording_warning = '\t Tests with failed playback and successful re-recording - ' + str(rerecorded_count) + ':'
        print_warning(recording_warning)
        for playbook_id in rerecorded_tests:
            print_warning('\t - ' + playbook_id)

    if empty_mocks_count > 0:
        empty_mock_successes_msg = '\t Successful tests with empty mock files - ' + str(empty_mocks_count) + ':'
        print(empty_mock_successes_msg)
        proxy_explanation = '\t (either there were no http requests or no traffic is passed through the proxy.\n'\
                            '\t Investigate the playbook and the integrations.\n'\
                            '\t If the integration has no http traffic, add to unmockable_integrations in conf.json)'
        print(proxy_explanation)
        for playbook_id in empty_files:
            print('\t - ' + playbook_id)

    if len(skipped_integration) > 0:
        skipped_integrations_warning = '\t Number of skipped integration - ' + str(len(skipped_integration)) + ':'
        print_warning(skipped_integrations_warning)
        for playbook_id in skipped_integration:
            print_warning('\t - ' + playbook_id)

    if skipped_count > 0:
        skipped_tests_warning = '\t Number of skipped tests - ' + str(skipped_count) + ':'
        print_warning(skipped_tests_warning)
        for playbook_id in skipped_tests:
            print_warning('\t - ' + playbook_id)

    if unmocklable_integrations_count > 0:
        unmockable_warning = '\t Number of unmockable integrations - ' + str(unmocklable_integrations_count) + ':'
        print_warning(unmockable_warning)
        for playbook_id, reason in unmocklable_integrations.items():
            print_warning('\t - ' + playbook_id + ' - ' + reason)
Ejemplo n.º 17
0
def run_test_logic(c,
                   failed_playbooks,
                   integrations,
                   playbook_id,
                   succeed_playbooks,
                   test_message,
                   test_options,
                   slack,
                   circle_ci,
                   build_number,
                   server_url,
                   build_name,
                   is_mock_run=False):
    status, inc_id = test_integration(c, integrations, playbook_id,
                                      test_options, is_mock_run)
    if status == PB_Status.COMPLETED:
        print_color('PASS: {} succeed'.format(test_message), LOG_COLORS.GREEN)
        succeed_playbooks.append(playbook_id)

    elif status == PB_Status.NOT_SUPPORTED_VERSION:
        print('PASS: {} skipped - not supported version'.format(test_message))
        succeed_playbooks.append(playbook_id)

    else:
        print_error('Failed: {} failed'.format(test_message))
        playbook_id_with_mock = playbook_id
        if not is_mock_run:
            playbook_id_with_mock += " (Mock Disabled)"
        failed_playbooks.append(playbook_id_with_mock)
        notify_failed_test(slack, circle_ci, playbook_id, build_number, inc_id,
                           server_url, build_name)

    succeed = status == PB_Status.COMPLETED or status == PB_Status.NOT_SUPPORTED_VERSION
    return succeed
Ejemplo n.º 18
0
def slack_notifier(build_url, slack_token, env_results_file_name, container):
    branches = run_command("git branch")
    branch_name_reg = re.search(r'\* (.*)', branches)
    branch_name = branch_name_reg.group(1)

    if branch_name == 'master':
        print("Extracting build status")
        # container 1: unit tests
        if int(container):
            print_color("Starting Slack notifications about nightly build - unit tests", LOG_COLORS.GREEN)
            content_team_attachments = get_attachments_for_unit_test(build_url)

        # container 0: test playbooks
        else:
            print_color("Starting Slack notifications about nightly build - tests playbook", LOG_COLORS.GREEN)
            content_team_attachments, _ = get_attachments_for_test_playbooks(build_url, env_results_file_name)

        print("Sending Slack messages to #content-team")
        slack_client = SlackClient(slack_token)
        slack_client.api_call(
            "chat.postMessage",
            channel="dmst-content-team",
            username="******",
            as_user="******",
            attachments=content_team_attachments
        )
Ejemplo n.º 19
0
def main():
    options = options_handler()
    server = options.server
    is_ami = options.isAMI
    server_version = options.serverVersion
    server_numeric_version = '0.0.0'

    if is_ami:  # Run tests in AMI configuration
        with open('./Tests/images_data.txt', 'r') as image_data_file:
            image_data = [
                line for line in image_data_file
                if line.startswith(server_version)
            ]
            if len(image_data) != 1:
                print('Did not get one image data for server version, got {}'.
                      format(image_data))
            else:
                server_numeric_version = re.findall(
                    'Demisto-Circle-CI-Content-[\w-]+-([\d.]+)-[\d]{5}',
                    image_data[0])
                if server_numeric_version:
                    server_numeric_version = server_numeric_version[0]
                else:
                    server_numeric_version = '99.99.98'  # latest
                print('Server image info: {}'.format(image_data[0]))
                print('Server version: {}'.format(server_numeric_version))

        with open('./Tests/instance_ips.txt', 'r') as instance_file:
            instance_ips = instance_file.readlines()
            instance_ips = [
                line.strip('\n').split(":") for line in instance_ips
            ]

        for ami_instance_name, ami_instance_ip in instance_ips:
            if ami_instance_name == server_version:
                print_color("Starting tests for {}".format(ami_instance_name),
                            LOG_COLORS.GREEN)
                print("Starts tests with server url - https://{}".format(
                    ami_instance_ip))
                server = SERVER_URL.format(ami_instance_ip)
                execute_testing(server, ami_instance_ip, server_version,
                                server_numeric_version)
                sleep(8)

    else:  # Run tests in Server build configuration
        server_numeric_version = '99.99.98'  # assume latest
        print("Using server version: {} (assuming latest for non-ami)".format(
            server_numeric_version))
        with open('./Tests/instance_ips.txt', 'r') as instance_file:
            instance_ips = instance_file.readlines()
            instance_ip = [
                line.strip('\n').split(":")[1] for line in instance_ips
            ][0]

        execute_testing(SERVER_URL.format(instance_ip),
                        instance_ip,
                        server_version,
                        server_numeric_version,
                        is_ami=False)
Ejemplo n.º 20
0
def main():
    options = options_handler()
    if options.nightly:
        slack_notifier(options.url, options.slack,
                       options.env_results_file_name)
    else:
        print_color(
            "Not nightly build, stopping Slack Notifications about Content build",
            LOG_COLORS.RED)
Ejemplo n.º 21
0
def handle_run_res(res: Tuple[subprocess.CompletedProcess, str], fail_pkgs: list, good_pkgs: list):
    if res[0].returncode != 0:
        fail_pkgs.append(res[1])
        print_color("============= {} =============".format(res[1]), LOG_COLORS.RED)
    else:
        good_pkgs.append(res[1])
        print("============= {} =============".format(res[1]))
    print(res[0].stdout)
    print(res[0].stderr)
def main():
    if len(sys.argv) == 2 and (sys.argv[1] == '-h' or sys.argv[1] == '--help'):
        print(
            "Run pkg_dev_test_tasks.py in parallel. Accepts same parameters as pkg_dev_test_tasks.py.\n"
            "Additionally you can specify the following environment variables:\n"
            "DIFF_COMPARE: specify how to do a git compare. Leave empty to run on all.\n"
            "MAX_WORKERS: max amount of workers to use for running ")
        sys.exit(1)
    max_workers = int(os.getenv("MAX_WORKERS", "10"))
    find_out = subprocess.check_output([
        "find", "Integrations", "Scripts", "Beta_Integrations", "-maxdepth",
        "1", "-mindepth", "1", "-type", "d", "-print"
    ],
                                       text=True)
    pkg_dirs = find_out.splitlines()
    pkgs_to_run = []
    for dir in pkg_dirs:
        if should_run_pkg(dir):
            pkgs_to_run.append(dir)
    print(
        "Starting parallel run for [{}] packages with [{}] max workers".format(
            len(pkgs_to_run), max_workers))
    params = sys.argv[1::]
    fail_pkgs = []
    good_pkgs = []
    if len(pkgs_to_run) > 1:  # setup pipenv before hand to avoid conflics
        get_dev_requirements(2.7)
        get_dev_requirements(3.7)
    # run CommonServer non parallel to avoid conflicts
    # when we modify the file for mypy includes
    if 'Scripts/CommonServerPython' in pkgs_to_run:
        pkgs_to_run.remove('Scripts/CommonServerPython')
        res = run_dev_task('Scripts/CommonServerPython', params)
        handle_run_res(res, fail_pkgs, good_pkgs)
    with concurrent.futures.ThreadPoolExecutor(
            max_workers=max_workers) as executor:
        futures_submit = [
            executor.submit(run_dev_task, dir, params) for dir in pkgs_to_run
        ]
        for future in concurrent.futures.as_completed(futures_submit):
            res = future.result()
            handle_run_res(res, fail_pkgs, good_pkgs)
    if fail_pkgs:
        create_failed_unittests_file(fail_pkgs)
        print_color("\n******* FAIL PKGS: *******", LOG_COLORS.RED)
        print_color("\n\t{}\n".format("\n\t".join(fail_pkgs)), LOG_COLORS.RED)
    if good_pkgs:
        print_color("\n******* SUCCESS PKGS: *******", LOG_COLORS.GREEN)
        print_color("\n\t{}\n".format("\n\t".join(good_pkgs)),
                    LOG_COLORS.GREEN)
    if not good_pkgs and not fail_pkgs:
        print_color("\n******* No changed packages found *******\n",
                    LOG_COLORS.YELLOW)
    if fail_pkgs:
        sys.exit(1)
Ejemplo n.º 23
0
    def upload_to_storage(self, zip_pack_path, latest_version, storage_bucket,
                          override_pack):
        """ Manages the upload of pack zip artifact to correct path in cloud storage.
        The zip pack will be uploaded to following path: /content/packs/pack_name/pack_latest_version.
        In case that zip pack artifact already exist at constructed path, the upload will be skipped.
        If flag override_pack is set to True, pack will forced for upload.

        Args:
            zip_pack_path (str): full path to pack zip artifact.
            latest_version (str): pack latest version.
            storage_bucket (google.cloud.storage.bucket.Bucket): google cloud storage bucket.
            override_pack (bool): whether to override existing pack.

        Returns:
            bool: whether the operation succeeded.
            bool: True in case of pack existence at targeted path and upload was skipped, otherwise returned False.

        """
        task_status = True

        try:
            version_pack_path = os.path.join(STORAGE_BASE_PATH,
                                             self._pack_name, latest_version)
            existing_files = [
                f.name
                for f in storage_bucket.list_blobs(prefix=version_pack_path)
            ]

            if existing_files and not override_pack:
                print_warning(
                    f"The following packs already exist at storage: {', '.join(existing_files)}"
                )
                print_warning(
                    f"Skipping step of uploading {self._pack_name}.zip to storage."
                )
                return task_status, True

            pack_full_path = f"{version_pack_path}/{self._pack_name}.zip"
            blob = storage_bucket.blob(pack_full_path)

            with open(zip_pack_path, "rb") as pack_zip:
                blob.upload_from_file(pack_zip)

            self.relative_storage_path = blob.name
            print_color(
                f"Uploaded {self._pack_name} pack to {pack_full_path} path.",
                LOG_COLORS.GREEN)

            return task_status, False
        except Exception as e:
            task_status = False
            print_error(
                f"Failed in uploading {self._pack_name} pack to gcs.\nAdditional info: {e}"
            )
            return task_status, True
Ejemplo n.º 24
0
def main():
    options = parse_script_arguments()
    is_circle = options.circle
    branch_name = get_branch_name()
    is_forked = re.match(EXTERNAL_PR_REGEX, branch_name) is not None
    if not is_forked:
        secrets_found = get_secrets(branch_name, is_circle)
        if secrets_found:
            sys.exit(1)
        else:
            print_color('Finished validating secrets, no secrets were found.', LOG_COLORS.GREEN)
    sys.exit(0)
Ejemplo n.º 25
0
def is_correct_content_installed(username, password, ips, content_version):
    # type: (AnyStr, AnyStr, List[List], AnyStr) -> bool
    """ Checks if specific content version is installed on server list

    Args:
        username: for server connection
        password: for server connection
        ips: list with lists of [instance_name, instance_ip]
        content_version: content version that should be installed

    Returns:
        True: if all tests passed, False if one failure
    """
    method = "post"
    suffix = "/content/installed/"
    for ami_instance_name, ami_instance_ip in ips:
        d = demisto.DemistoClient(None, "https://{}".format(ami_instance_ip),
                                  username, password)
        d.Login()
        resp = d.req(method, suffix, None)
        resp_json = None
        try:
            resp_json = resp.json()
            if not isinstance(resp_json, dict):
                raise ValueError(
                    'Response from server is not a Dict, got [{}].\n'
                    'Text: {}'.format(type(resp_json), resp.text))
            release = resp_json.get("release")
            notes = resp_json.get("releaseNotes")
            installed = resp_json.get("installed")
            if not (release and content_version in release and notes
                    and installed):
                print_error(
                    "Failed install content on instance [{}]\nfound content version [{}], expected [{}]"
                    "".format(ami_instance_name, release, content_version))
                return False
            else:
                print_color(
                    "Instance [{instance_name}] content verified with version [{content_version}]"
                    .format(instance_name=ami_instance_name,
                            content_version=release), LOG_COLORS.GREEN)
        except ValueError as exception:
            err_msg = "Failed to verify content version on server [{}]\n" \
                      "Error: [{}]\n".format(ami_instance_name, str(exception))
            if resp_json is not None:
                err_msg += "Server response: {}".format(resp_json)
            print_error(err_msg)
            return False
    print_color(
        "Content was installed successfully on all of the instances! :)",
        LOG_COLORS.GREEN)
    return True
Ejemplo n.º 26
0
def slack_notifier(slack_token, secret_conf_path, server, user, password, build_url):
    print_color("Starting Slack notifications about instances", LOG_COLORS.GREEN)
    attachments, integrations_counter = get_attachments(secret_conf_path, server, user, password, build_url)

    sc = SlackClient(slack_token)
    sc.api_call(
        "chat.postMessage",
        channel="dmst-content-lab",
        username="******",
        as_user="******",
        attachments=attachments,
        text="You have {0} instances configurations".format(integrations_counter)
    )
Ejemplo n.º 27
0
def download_and_extract_index(storage_bucket, extract_destination_path):
    """Downloads and extracts index zip from cloud storage.

    Args:
        storage_bucket (google.cloud.storage.bucket.Bucket): google storage bucket where index.zip is stored.
        extract_destination_path (str): the full path of extract folder.

    Returns:
        str: extracted index folder full path.
        Blob: google cloud storage object that represents index.zip blob.

    """
    index_storage_path = os.path.join(STORAGE_BASE_PATH,
                                      f"{Pack.INDEX_NAME}.zip")
    download_index_path = os.path.join(extract_destination_path,
                                       f"{Pack.INDEX_NAME}.zip")

    index_blob = storage_bucket.blob(index_storage_path)
    index_folder_path = os.path.join(extract_destination_path, Pack.INDEX_NAME)

    if not index_blob.exists():
        os.mkdir(index_folder_path)
        return index_folder_path, index_blob

    index_blob.cache_control = "no-cache"  # index zip should never be cached in the memory, should be updated version
    index_blob.reload()
    index_blob.download_to_filename(download_index_path)

    if os.path.exists(download_index_path):
        with ZipFile(download_index_path, 'r') as index_zip:
            index_zip.extractall(extract_destination_path)

        if not os.path.exists(index_folder_path):
            print_error(
                f"Failed creating {Pack.INDEX_NAME} folder with extracted data."
            )
            sys.exit(1)

        os.remove(download_index_path)
        print_color(
            f"Finished downloading and extracting {Pack.INDEX_NAME} file to {extract_destination_path}",
            LOG_COLORS.GREEN)

        return index_folder_path, index_blob
    else:
        print_error(
            f"Failed to download {Pack.INDEX_NAME}.zip file from cloud storage."
        )
        sys.exit(1)
Ejemplo n.º 28
0
def mock_run(c, proxy, failed_playbooks, integrations, playbook_id,
             succeed_playbooks, test_message, test_options, slack, circle_ci,
             build_number, server_url, build_name, start_message):
    rerecord = False

    if proxy.has_mock_file(playbook_id):
        print('{} (Mock: Playback)'.format(start_message))
        proxy.start(playbook_id)
        # run test
        status, inc_id = test_integration(c,
                                          integrations,
                                          playbook_id,
                                          test_options,
                                          is_mock_run=True)
        # use results
        proxy.stop()
        if status == PB_Status.COMPLETED:
            print_color('PASS: {} succeed'.format(test_message),
                        LOG_COLORS.GREEN)
            succeed_playbooks.append(playbook_id)
            print('------ Test {} end ------\n'.format(test_message))

            return

        elif status == PB_Status.NOT_SUPPORTED_VERSION:
            print('PASS: {} skipped - not supported version'.format(
                test_message))
            succeed_playbooks.append(playbook_id)
            print('------ Test {} end ------\n'.format(test_message))

            return

        else:
            print(
                "Test failed with mock, recording new mock file. (Mock: Recording)"
            )
            rerecord = True
    else:
        print(start_message + ' (Mock: Recording)')

    # Mock recording - no mock file or playback failure.
    succeed = run_and_record(c, proxy, failed_playbooks, integrations,
                             playbook_id, succeed_playbooks, test_message,
                             test_options, slack, circle_ci, build_number,
                             server_url, build_name)

    if rerecord and succeed:
        proxy.rerecorded_tests.append(playbook_id)
    print('------ Test {} end ------\n'.format(test_message))
Ejemplo n.º 29
0
def main():
    """Execute FilesValidator checks on the modified changes in your branch, or all files in case of master.

    This script runs both in a local and a remote environment. In a local environment we don't have any
    logger assigned, and then pykwalify raises an error, since it is logging the validation results.
    Therefore, if we are in a local env, we set up a logger. Also, we set the logger's level to critical
    so the user won't be disturbed by non critical loggings
    """
    branches = run_command('git branch')
    branch_name_reg = re.search(r'\* (.*)', branches)
    branch_name = branch_name_reg.group(1)

    parser = argparse.ArgumentParser(description='Utility CircleCI usage')
    parser.add_argument('-c', '--circle', type=str2bool, default=False, help='Is CircleCi or not')
    parser.add_argument('-b', '--backwardComp', type=str2bool, default=True, help='To check backward compatibility.')
    parser.add_argument('-t', '--test-filter', type=str2bool, default=False, help='Check that tests are valid.')
    parser.add_argument('-p', '--prev-ver', help='Previous branch or SHA1 commit to run checks against.')
    options = parser.parse_args()
    is_circle = options.circle
    is_backward_check = options.backwardComp

    logging.basicConfig(level=logging.CRITICAL)

    print_color('Starting validating files structure', LOG_COLORS.GREEN)
    files_validator = FilesValidator(is_circle, print_ignored_files=True)
    if not files_validator.is_valid_structure(branch_name, is_backward_check=is_backward_check,
                                              prev_ver=options.prev_ver):
        sys.exit(1)
    if options.test_filter:
        try:
            print_warning('Updating idset. Be patient if this is the first time...')
            subprocess.check_output(['./Tests/scripts/update_id_set.py'])
            print_warning('Checking that we have tests for all content...')
            try:
                tests_out = subprocess.check_output(['./Tests/scripts/configure_tests.py', '-s', 'true'],
                                                    stderr=subprocess.STDOUT)
                print(tests_out)
            except Exception:
                print_warning('Recreating idset to be sure that configure tests failure is accurate.'
                              ' Be patient this can take 15-20 seconds ...')
                subprocess.check_output(['./Tests/scripts/update_id_set.py', '-r'])
                print_warning('Checking that we have tests for all content again...')
                subprocess.check_call(['./Tests/scripts/configure_tests.py', '-s', 'true'])
        except Exception as ex:
            print_error('Failed validating tests: {}'.format(ex))
            sys.exit(1)
    print_color('Finished validating files structure', LOG_COLORS.GREEN)
    sys.exit(0)
Ejemplo n.º 30
0
    def format_metadata(self, pack_content_items, integration_images):
        """Re-formats metadata according to marketplace metadata format defined in issue #19786 and writes back
        the result.

        Args:
            pack_content_items (dict): content items that are located inside specific pack. Possible keys of the dict:
            Classifiers, Dashboards, IncidentFields, IncidentTypes, IndicatorFields, Integrations, Layouts, Playbooks,
            Reports, Scripts and Widgets. Each key is mapped to list of items with name and description. Several items
            have no description.
            integration_images (list): list of uploaded integration images with integration display name and image gcs
            public url.

        Returns:
            bool: True is returned in case metadata file was parsed successfully, otherwise False.

        """
        user_metadata_path = os.path.join(
            self._pack_path,
            Pack.USER_METADATA)  # user metadata path before parsing
        metadata_path = os.path.join(
            self._pack_path,
            Pack.METADATA)  # deployed metadata path after parsing

        if not os.path.exists(user_metadata_path):
            print_error(
                f"{self._pack_name} pack is missing {Pack.USER_METADATA} file."
            )
            return False

        with open(user_metadata_path, "r") as user_metadata_file:
            user_metadata = json.load(
                user_metadata_file)  # loading user metadata
            formatted_metadata = Pack._parse_pack_metadata(
                user_metadata=user_metadata,
                pack_content_items=pack_content_items,
                pack_id=self._pack_name,
                integration_images=integration_images)

        with open(metadata_path, "w") as metadata_file:
            json.dump(formatted_metadata, metadata_file,
                      indent=4)  # writing back parsed metadata

        print_color(
            f"Finished formatting {self._pack_name} packs's {Pack.METADATA} {metadata_path} file.",
            LOG_COLORS.GREEN)
        return True
def update_content_on_demisto_instance(client, username, password, server):
    '''Try to update the content

    Args:
        client (demisto_client): The configured client to use.
        username (str): The username to pass to Tests/update_content_data.py
        password (str): The password to pass to Tests/update_content_data.py
        server (str): The server url to pass to Tests/update_content_data.py
    '''
    content_zip_path = 'artifacts/all_content.zip'
    cmd_str = 'python Tests/update_content_data.py -u {} -p {} -s {} --content_zip {}'.format(
        username, password, server, content_zip_path)
    run_command(cmd_str, is_silenced=False)

    # Check if content update has finished installing
    sleep_interval = 20
    updating_content = is_content_update_in_progress(client)
    while updating_content.lower() == 'true':
        sleep(sleep_interval)
        updating_content = is_content_update_in_progress(client)

    if updating_content.lower() == 'request unsuccessful':
        # since the request to check if content update installation finished didn't work, can't use that mechanism
        # to check and just try sleeping for 30 seconds instead to allow for content update installation to complete
        sleep(30)
    else:
        # check that the content installation updated
        # verify the asset id matches the circleci build number / asset_id in the content-descriptor.json
        release, asset_id = get_content_version_details(client)
        with open('content-descriptor.json', 'r') as cd_file:
            cd_json = json.loads(cd_file.read())
            cd_release = cd_json.get('release')
            cd_asset_id = cd_json.get('assetId')
        if release == cd_release and asset_id == cd_asset_id:
            print_color('Content Update Successfully Installed!',
                        color=LOG_COLORS.GREEN)
        else:
            err_details = 'Attempted to install content with release "{}" and assetId '.format(
                cd_release)
            err_details += '"{}" but release "{}" and assetId "{}" were '.format(
                cd_asset_id, release, asset_id)
            err_details += 'retrieved from the instance post installation.'
            print_error(
                'Content Update was Unsuccessful:\n{}'.format(err_details))
            sys.exit(1)
def verify_base_branch(pr_num):
    """Checks if the base branch is master or not

    Args:
        pr_num (string): The string representation of the pr number

    Returns:
        True if valid, False otherwise. And attaches the appropriate message to the user.
    """

    print_color('Fetching the base branch of pull request #{}.'.format(pr_num),
                LOG_COLORS.NATIVE)
    base_branch = get_base_branch(pr_num)
    if base_branch == 'master':
        return 'Cannot merge a contribution directly to master, the pull request reviewer will handle that soon.', False
    else:
        return 'Verified pull request #{} base branch successfully.'.format(
            pr_num), True
Ejemplo n.º 33
0
def slack_notifier(build_url, slack_token):
    branches = run_command("git branch")
    branch_name_reg = re.search("\* (.*)", branches)
    branch_name = branch_name_reg.group(1)

    if branch_name == 'master':
        print_color("Starting Slack notifications about nightly build", LOG_COLORS.GREEN)
        print("Extracting build status")
        content_team_attachments, content_attachments = get_attachments(build_url)

        print("Sending Slack messages to #content and #content-team")
        sc = SlackClient(slack_token)
        sc.api_call(
            "chat.postMessage",
            channel="content-team",
            username="******",
            as_user="******",
            attachments=content_team_attachments
        )
Ejemplo n.º 34
0
def find_tests_for_modified_files(modified_files):
    script_names = set([])
    playbook_names = set([])
    integration_ids = set([])

    tests_set, catched_scripts, catched_playbooks = collect_changed_ids(integration_ids, playbook_names,
                                                                        script_names, modified_files)
    test_ids, missing_ids, caught_missing_test = collect_tests(script_names, playbook_names, integration_ids,
                                                               catched_scripts, catched_playbooks, tests_set)
    missing_ids = update_with_tests_sections(missing_ids, modified_files, test_ids, tests_set)

    if len(missing_ids) > 0:
        test_string = '\n'.join(missing_ids)
        message = "You've failed to provide tests for:\n{0}".format(test_string)
        print_color(message, LOG_COLORS.RED)

    if caught_missing_test or len(missing_ids) > 0:
        sys.exit(1)

    return tests_set
Ejemplo n.º 35
0
def update_with_tests_sections(missing_ids, modified_files, test_ids, tests):
    test_ids.append(RUN_ALL_TESTS_FORMAT)
    # Search for tests section
    for file_path in modified_files:
        tests_from_file = get_tests(file_path)
        for test in tests_from_file:
            if test in test_ids or re.match(NO_TESTS_FORMAT, test, re.IGNORECASE):
                if re.match(INTEGRATION_REGEX, file_path, re.IGNORECASE) or \
                        re.match(BETA_INTEGRATION_REGEX, file_path, re.IGNORECASE):
                    _id = get_script_or_integration_id(file_path)

                else:
                    _id = get_name(file_path)

                missing_ids = missing_ids - {_id}
                tests.add(test)

            else:
                message = "The test '{0}' does not exist in the conf.json file, please re-check your code".format(test)
                print_color(message, LOG_COLORS.RED)
                sys.exit(1)

    return missing_ids
Ejemplo n.º 36
0
    branches = run_command("git branch")
    branch_name_reg = re.search("\* (.*)", branches)
    branch_name = branch_name_reg.group(1)

    if branch_name == 'master':
        print_color("Starting Slack notifications about nightly build", LOG_COLORS.GREEN)
        print("Extracting build status")
        content_team_attachments, content_attachments = get_attachments(build_url)

        print("Sending Slack messages to #content and #content-team")
        sc = SlackClient(slack_token)
        sc.api_call(
            "chat.postMessage",
            channel="content-team",
            username="******",
            as_user="******",
            attachments=content_team_attachments
        )


if __name__ == "__main__":
    options = options_handler()
    if options.nightly:
        slack_notifier(options.url, options.slack)
    else:
        print_color("Not nightly build, stopping Slack Notifications about Content build", LOG_COLORS.RED)

    os.remove('./Tests/failed_tests.txt')
    os.remove('./Tests/skipped_tests.txt')
    os.remove('./Tests/skipped_integrations.txt')
Ejemplo n.º 37
0
def re_create_id_set():
    start_time = time.time()
    scripts_list = []
    playbooks_list = []
    integration_list = []
    testplaybooks_list = []

    pool = Pool(processes=cpu_count() * 2)

    print_color("Starting the creation of the id_set", LOG_COLORS.GREEN)
    print_color("Starting iterating over Integrations", LOG_COLORS.GREEN)
    integration_files = glob.glob(os.path.join('Integrations', '*'))
    integration_files.extend(glob.glob(os.path.join('Beta_Integrations', '*')))
    for arr in pool.map(process_integration, integration_files):
        integration_list.extend(arr)
    print_color("Starting iterating over Playbooks", LOG_COLORS.GREEN)
    for arr in pool.map(process_playbook, glob.glob(os.path.join('Playbooks', '*'))):
        playbooks_list.extend(arr)
    print_color("Starting iterating over Scripts", LOG_COLORS.GREEN)
    for arr in pool.map(process_script, glob.glob(os.path.join('Scripts', '*'))):
        scripts_list.extend(arr)
    print_color("Starting iterating over TestPlaybooks", LOG_COLORS.GREEN)
    for pair in pool.map(process_testplaybook_path, glob.glob(os.path.join('TestPlaybooks', '*'))):
        if pair[0]:
            testplaybooks_list.append(pair[0])
        if pair[1]:
            scripts_list.append(pair[1])

    new_ids_dict = OrderedDict()
    # we sort each time the whole set in case someone manually changed something
    # it shouldn't take too much time
    new_ids_dict['scripts'] = sort(scripts_list)
    new_ids_dict['playbooks'] = sort(playbooks_list)
    new_ids_dict['integrations'] = sort(integration_list)
    new_ids_dict['TestPlaybooks'] = sort(testplaybooks_list)

    with open('./Tests/id_set.json', 'w') as id_set_file:
        json.dump(new_ids_dict, id_set_file, indent=4)
    exec_time = time.time() - start_time
    print_color("Finished the creation of the id_set. Total time: {} seconds".format(exec_time), LOG_COLORS.GREEN)
Ejemplo n.º 38
0
            commit_string = run_command("git log -n 2 --pretty='%H'")
            commit_string = commit_string.replace("'", "")
            last_commit, second_last_commit = commit_string.split()
            files_string = run_command("git diff --name-status {}...{}".format(second_last_commit, last_commit))

        tests = get_test_list(files_string, branch_name)

        tests_string = '\n'.join(tests)
        if tests_string:
            print('Collected the following tests:\n{0}\n'.format(tests_string))
        else:
            print('No filter configured, running all tests')

    print("Creating filter_file.txt")
    with open("./Tests/filter_file.txt", "w") as filter_file:
        filter_file.write(tests_string)


if __name__ == "__main__":
    print_color("Starting creation of test filter file", LOG_COLORS.GREEN)

    parser = argparse.ArgumentParser(description='Utility CircleCI usage')
    parser.add_argument('-n', '--nightly', type=str2bool, help='Is nightly or not')
    options = parser.parse_args()

    # Create test file based only on committed files
    create_test_file(options.nightly)

    print_color("Finished creation of the test filter file", LOG_COLORS.GREEN)
    sys.exit(0)
Ejemplo n.º 39
0
            username="******",
            as_user="******",
            attachments=attachments,
            text="You have {0} instances configurations".format(integrations_counter)
        )

        sc.api_call(
            "chat.postMessage",
            channel="content-lab-tests",
            username="******",
            as_user="******",
            attachments=attachments,
            text="You have {0} instances configurations".format(integrations_counter)
        )


if __name__ == "__main__":
    options = options_handler()
    if options.nightly:
        with open('./Tests/instance_ips.txt', 'r') as instance_file:
            instance_ips = instance_file.readlines()
            instance_ips = [line.strip('\n').split(":") for line in instance_ips]

        for ami_instance_name, ami_instance_ip in instance_ips:
            if ami_instance_name == "Demisto GA":
                server = SERVER_URL.format(ami_instance_ip)

        slack_notifier(options.slack, options.secret, server, options.user, options.password, options.buildUrl)
    else:
        print_color("Not nightly build, stopping Slack Notifications about instances", LOG_COLORS.RED)