Exemplo n.º 1
0
def get_release_notes_draft(github_token, asset_id):
    """
    if possible, download current release draft from content repository in github.

    :param github_token: github token with push permission (in order to get the draft).
    :param asset_id: content build's asset id.
    :return: draft text (or empty string on error).
    """
    # Disable insecure warnings
    requests.packages.urllib3.disable_warnings()

    res = requests.get(
        'https://api.github.com/repos/demisto/content/releases',
        verify=False,
        headers={'Authorization': 'token {}'.format(github_token)})

    if res.status_code != 200:
        print_warning('unable to get release draft ({}), reason:\n{}'.format(
            res.status_code, res.text))
        return ''

    drafts = [release for release in res.json() if release.get('draft', False)]
    if drafts:
        if len(drafts) == 1:
            return drafts[0]['body'].replace("xxxxx", asset_id)
        else:
            print_warning(
                'Too many drafts to choose from ({}), skipping update.'.format(
                    len(drafts)))

    return ''
def get_base_branch(pr_num):
    """Fetches the base branch name of PR num {pr_num}

    Args:
        pr_num (string): The string representation of the pr number

    Returns:
        string. The name of the base branch of the pr if succeeds, '' otherwise.
    """

    # Disable insecure warnings
    requests.packages.urllib3.disable_warnings()  # pylint: disable=no-member

    url = 'https://api.github.com/repos/demisto/content/pulls/{}'.format(
        pr_num)

    try:
        res = requests.get(url, verify=False)
        res.raise_for_status()
        pr = res.json()
        if pr and isinstance(pr, list) and len(pr) == 1:
            # github usually returns a list of PRs, if not pr is a dict
            pr = pr[0]
        return pr.get('base', {}).get('ref', '')

    except (requests.exceptions.HTTPError, ValueError) as e:
        # If we didn't succeed to fetch pr for any http error / res.json() we raise an error
        # then we don't want the build to fail
        print_warning('Unable to fetch pull request #{0}.\nError: {1}'.format(
            pr_num, str(e)))
        return ''
Exemplo n.º 3
0
    def __init__(self,
                 file_path,
                 check_git=True,
                 old_file_path=None,
                 old_git_branch='master'):
        self._is_valid = True

        self.file_path = file_path
        if check_git:
            self.current_integration = get_yaml(file_path)
            # The replace in the end is for Windows support
            if old_file_path:
                git_hub_path = os.path.join(CONTENT_GITHUB_LINK,
                                            old_git_branch,
                                            old_file_path).replace("\\", "/")
                file_content = requests.get(git_hub_path, verify=False).content
                self.old_integration = yaml.safe_load(file_content)
            else:
                try:
                    file_path_from_old_branch = os.path.join(
                        CONTENT_GITHUB_LINK, old_git_branch,
                        file_path).replace("\\", "/")
                    res = requests.get(file_path_from_old_branch, verify=False)
                    res.raise_for_status()
                    self.old_integration = yaml.safe_load(res.content)
                except Exception as e:
                    print_warning(
                        "{}\nCould not find the old integration please make sure that you did not break "
                        "backward compatibility".format(str(e)))
                    self.old_integration = None
Exemplo n.º 4
0
    def validate_file_release_notes(self):
        """Validate that the file has proper release notes when modified.

        This function updates the class attribute self._is_valid instead of passing it back and forth.
        """
        if self.is_renamed:
            print_warning("You might need RN please make sure to check that.")
            return

        data_dictionary = None
        if os.path.isfile(self.file_path):
            with open(os.path.expanduser(self.file_path), "r") as f:
                if self.file_path.endswith(".json"):
                    data_dictionary = json.load(f)
                elif self.file_path.endswith(
                        ".yaml") or self.file_path.endswith('.yml'):
                    try:
                        data_dictionary = yaml.safe_load(f)
                    except Exception as e:
                        print_error(self.file_path +
                                    " has yml structure issue. Error was: " +
                                    str(e))
                        self._is_valid = False

            if data_dictionary and data_dictionary.get('releaseNotes') is None:
                print_error("File " + self.file_path +
                            " is missing releaseNotes, please add.")
                self._is_valid = False
Exemplo n.º 5
0
def test_instances(secret_conf_path, server, username, password):
    integrations = get_integrations(secret_conf_path)

    instance_ids = []
    failed_integrations = []
    integrations_counter = 0

    prints_manager = ParallelPrintsManager(1)

    content_installation_client = demisto_client.configure(base_url=server,
                                                           username=username,
                                                           password=password,
                                                           verify_ssl=False)
    install_new_content(content_installation_client, server)
    for integration in integrations:
        c = demisto_client.configure(base_url=server,
                                     username=username,
                                     password=password,
                                     verify_ssl=False)
        integrations_counter += 1
        integration_name = integration.get('name')
        integration_instance_name = integration.get('instance_name', '')
        integration_params = integration.get('params')
        devops_comments = integration.get('devops_comments')
        product_description = integration.get('product_description', '')
        is_byoi = integration.get('byoi', True)
        has_integration = integration.get('has_integration', True)
        validate_test = integration.get('validate_test', True)

        if has_integration:
            instance_id, failure_message, _ = __create_integration_instance(
                c,
                integration_name,
                integration_instance_name,
                integration_params,
                is_byoi,
                prints_manager,
                validate_test=validate_test)
            if failure_message == 'No configuration':
                print_warning(
                    "Warning: skipping {} as it exists in content-test-conf conf.json but not "
                    "in content repo".format(integration_name))
                continue
            if not instance_id:
                print_error(
                    'Failed to create instance of {} with message: {}'.format(
                        integration_name, failure_message))
                failed_integrations.append(
                    "{} {} - devops comments: {}".format(
                        integration_name, product_description,
                        devops_comments))
            else:
                instance_ids.append(instance_id)
                print('Create integration %s succeed' % (integration_name, ))
                __delete_integrations_instances(c, instance_ids,
                                                prints_manager)

            prints_manager.execute_thread_prints(0)

    return failed_integrations, integrations_counter
Exemplo n.º 6
0
def get_server_numeric_version(ami_env):
    """
    Gets the current server version
    Arguments:
        ami_env: (str)
            AMI version name.
            Print manager object.
    Returns:
        (str) Server numeric version
    """
    images_file_name = './Tests/images_data.txt'
    if not os.path.isfile(images_file_name):
        print_warning('Did not find image data file.')
        return '99.99.98'  # latest
    with open(images_file_name, 'r') as image_data_file:
        image_data = [
            line for line in image_data_file if line.startswith(ami_env)
        ]
        if len(image_data) != 1:
            print_warning(
                'Did not get one image data for server version, got {}'.format(
                    image_data))
            return '99.99.98'
        else:
            server_numeric_version = re.findall(
                r'Demisto-Circle-CI-Content-[\w-]+-([\d.]+)-[\d]{5}',
                image_data[0])
            if server_numeric_version:
                server_numeric_version = server_numeric_version[0]
            else:
                server_numeric_version = '99.99.98'  # latest
            print('Server image info: {}'.format(image_data[0]))
            print('Server version: {}'.format(server_numeric_version))
            return server_numeric_version
Exemplo n.º 7
0
def __test_integration_instance(client, module_instance):
    connection_retries = 3
    response_code = 0
    print_warning("trying to connect.")
    for i in range(connection_retries):
        try:
            response_data, response_code, _ = demisto_client.generic_request_func(self=client, method='POST',
                                                                                  path='/settings/integration/test',
                                                                                  body=module_instance,
                                                                                  _request_timeout=120)
            break
        except ApiException as conn_err:
            print_error(
                'Failed to test integration instance, error trying to communicate with demisto '
                'server: {} '.format(
                    conn_err))
            return False, None
        except urllib3.exceptions.ReadTimeoutError:
            print_warning("Could not connect. Trying to connect for the {} time".format(i + 1))

    if int(response_code) != 200:
        print_error('Integration-instance test ("Test" button) failed.\nBad status code: ' + str(
            response_code))
        return False, None

    result_object = ast.literal_eval(response_data)
    success, failure_message = bool(result_object.get('success')), result_object.get('message')
    return success, failure_message
Exemplo n.º 8
0
    def is_duplicate_description(self):
        """Check if the integration has a non-duplicate description ."""
        is_description_in_yml = False
        is_description_in_package = False
        package_path = None
        md_file_path = None
        if not re.match(INTEGRATION_REGEX, self.file_path, re.IGNORECASE):
            package_path = os.path.dirname(self.file_path)
            try:
                md_file_path = glob.glob(
                    os.path.join(os.path.dirname(self.file_path), '*.md'))[0]
            except IndexError:
                print_warning(
                    "No detailed description file was found in the package {}."
                    " Consider adding one.".format(package_path))
            if md_file_path:
                is_description_in_package = True

        data_dictionary = get_yaml(self.file_path)

        if not data_dictionary:
            return is_description_in_package

        if data_dictionary.get('detaileddescription'):
            is_description_in_yml = True

        if is_description_in_package and is_description_in_yml:
            self._is_valid = False
            print_error(
                "A description was found both in the package and in the yml, "
                "please update the package {}.".format(package_path))
            return False

        return True
Exemplo n.º 9
0
    def is_valid_fromversion_on_modified(self, change_string=None):
        """Check that the fromversion property was not changed on existing Content files.

        Args:
            change_string (string): the string that indicates the changed done on the file(git diff)

        Returns:
            bool. Whether the files' fromversion as been modified or not.
        """
        if self.is_renamed:
            print_warning(
                "fromversion might have been modified, please make sure it hasn't changed."
            )
            return True

        if not change_string:
            change_string = run_command("git diff HEAD {0}".format(
                self.file_path))

        is_added_from_version = re.search("\+([ ]+)?fromversion: .*",
                                          change_string)
        is_added_from_version_secondary = re.search(
            "\+([ ]+)?\"fromVersion\": .*", change_string)

        if is_added_from_version or is_added_from_version_secondary:
            print_error(
                "You've added fromversion to an existing file in the system, this is not allowed, please undo. "
                "the file was {}.".format(self.file_path))
            self._is_valid = False

        return self._is_valid
Exemplo n.º 10
0
def has_duplicate(id_set, id_to_check):
    duplicates = [duplicate for duplicate in id_set if duplicate.get(id_to_check)]

    if len(duplicates) < 2:
        return False

    for dup1, dup2 in itertools.combinations(duplicates, 2):
        dict1 = list(dup1.values())[0]
        dict2 = list(dup2.values())[0]
        dict1_from_version = LooseVersion(dict1.get('fromversion', '0.0.0'))
        dict2_from_version = LooseVersion(dict2.get('fromversion', '0.0.0'))
        dict1_to_version = LooseVersion(dict1.get('toversion', '99.99.99'))
        dict2_to_version = LooseVersion(dict2.get('toversion', '99.99.99'))

        if dict1['name'] != dict2['name']:
            print_warning('The following objects has the same ID but different names: '
                          '"{}", "{}".'.format(dict1['name'], dict2['name']))

        # A: 3.0.0 - 3.6.0
        # B: 3.5.0 - 4.5.0
        # C: 3.5.2 - 3.5.4
        # D: 4.5.0 - 99.99.99
        if any([
                dict1_from_version <= dict2_from_version < dict1_to_version,  # will catch (B, C), (A, B), (A, C)
                dict1_from_version < dict2_to_version <= dict1_to_version,  # will catch (B, C), (A, C)
                dict2_from_version <= dict1_from_version < dict2_to_version,  # will catch (C, B), (B, A), (C, A)
                dict2_from_version < dict1_to_version <= dict2_to_version,  # will catch (C, B), (C, A)
        ]):
            return True

    return False
Exemplo n.º 11
0
    def is_id_not_modified(self, change_string=None):
        """Check if the ID of the file has been changed.

        Args:
            change_string (string): the string that indicates the changes done on the file(git diff)

        Returns:
            bool. Whether the file's ID has been modified or not.
        """
        if self.is_renamed:
            print_warning(
                "ID might have changed, please make sure to check you have the correct one."
            )
            return True

        if not change_string:
            change_string = run_command("git diff HEAD {}".format(
                self.file_path))

        if re.search("[+-](  )?id: .*", change_string):
            print_error(
                "You've changed the ID of the file {0} please undo.".format(
                    self.file_path))
            self._is_valid = False

        return self._is_valid
Exemplo n.º 12
0
def create_file_release_notes(change_type, full_file_name):
    """
    Create release note for changed file.

    :param change_type: git change status (A, M, R*)
    :param full_file_name: path to file in repository
    :return: None
    """
    if isinstance(full_file_name, tuple):
        _, full_file_name = full_file_name

    is_pack = is_file_path_in_pack(full_file_name)
    if is_pack:
        file_type = full_file_name.split("/")[2]
    else:
        file_type = full_file_name.split("/")[0]
    base_name = os.path.basename(full_file_name)
    file_suffix = os.path.splitext(base_name)[-1]
    file_type_mapping = RELEASE_NOTE_GENERATOR.get(file_type)

    if file_type_mapping is None or file_suffix not in CONTENT_FILE_SUFFIXES:
        print_warning("Unsupported file type: {}".format(full_file_name))
        return

    if change_type != "R100":  # only file name has changed (no actual data was modified
        if 'R' in change_type:
            # handle the same as modified
            change_type = 'M'

        file_type_mapping.add(change_type, CONTENT_LIB_PATH + full_file_name)
Exemplo n.º 13
0
def create_file_release_notes(change_type, full_file_name, deleted_data):
    """
    Create release note for changed file.

    :param change_type: git change status (A, M, D, R*)
    :param full_file_name: path to file in repository
    :param deleted_data: all removed files content
    :return: None
    """
    if isinstance(full_file_name, tuple):
        old_file_path, full_file_name = full_file_name

    file_type = full_file_name.split("/")[0]
    base_name = os.path.basename(full_file_name)
    file_suffix = os.path.splitext(base_name)[-1]
    file_type_mapping = release_note_generator.get(file_type)

    if file_type_mapping is None or file_suffix not in CONTENT_FILE_SUFFIXES:
        print_warning("Unsupported file type: {}".format(full_file_name))
        return

    if change_type == "D":
        handle_deleted_file(deleted_data, full_file_name)
    elif change_type != "R100":  # only file name has changed (no actual data was modified
        if 'R' in change_type:
            # handle the same as modified
            change_type = 'M'

        file_type_mapping.add(change_type, contentLibPath + full_file_name)
Exemplo n.º 14
0
def is_correct_content_installed(ips, content_version, api_key):
    # type: (AnyStr, List[List], AnyStr) -> bool
    """ Checks if specific content version is installed on server list

    Args:
        ips: list with lists of [instance_name, instance_ip]
        content_version: content version that should be installed
        api_key: the demisto api key to create an api client with.

    Returns:
        True: if all tests passed, False if one failure
    """

    for ami_instance_name, ami_instance_ip in ips:
        host = "https://{}".format(ami_instance_ip)

        client = demisto_client.configure(base_url=host, api_key=api_key, verify_ssl=False)
        resp_json = None
        try:
            try:
                resp = demisto_client.generic_request_func(self=client, path='/content/installed/',
                                                           method='POST', accept='application/json',
                                                           content_type='application/json')
                try:
                    resp_json = ast.literal_eval(resp[0])
                except ValueError as err:
                    print_error(
                        'failed to parse response from demisto. response is {}.\nError:\n{}'.format(resp[0], err))
                    return False
            except ApiException as err:
                print(err)

            if not isinstance(resp_json, dict):
                raise ValueError('Response from server is not a Dict, got [{}].\n'
                                 'Text: {}'.format(type(resp_json), resp_json))
            release = resp_json.get("release")
            notes = resp_json.get("releaseNotes")
            installed = resp_json.get("installed")
            if not (release and content_version in release and notes and installed):
                if is_release_branch():
                    print_warning('On a release branch - ignoring content mismatch.')
                else:
                    print_error("Failed install content on instance [{}]\nfound content version [{}], expected [{}]"
                                "".format(ami_instance_name, release, content_version))
                    return False
            else:
                print_color("Instance [{instance_name}] content verified with version [{content_version}]".format(
                    instance_name=ami_instance_name, content_version=release),
                    LOG_COLORS.GREEN
                )
        except ValueError as exception:
            err_msg = "Failed to verify content version on server [{}]\n" \
                      "Error: [{}]\n".format(ami_instance_name, str(exception))
            if resp_json is not None:
                err_msg += "Server response: {}".format(resp_json)
            print_error(err_msg)
            return False
    return True
Exemplo n.º 15
0
    def upload_to_storage(self, zip_pack_path, latest_version, storage_bucket,
                          override_pack):
        """ Manages the upload of pack zip artifact to correct path in cloud storage.
        The zip pack will be uploaded to following path: /content/packs/pack_name/pack_latest_version.
        In case that zip pack artifact already exist at constructed path, the upload will be skipped.
        If flag override_pack is set to True, pack will forced for upload.

        Args:
            zip_pack_path (str): full path to pack zip artifact.
            latest_version (str): pack latest version.
            storage_bucket (google.cloud.storage.bucket.Bucket): google cloud storage bucket.
            override_pack (bool): whether to override existing pack.

        Returns:
            bool: whether the operation succeeded.
            bool: True in case of pack existence at targeted path and upload was skipped, otherwise returned False.

        """
        task_status = True

        try:
            version_pack_path = os.path.join(STORAGE_BASE_PATH,
                                             self._pack_name, latest_version)
            existing_files = [
                f.name
                for f in storage_bucket.list_blobs(prefix=version_pack_path)
            ]

            if existing_files and not override_pack:
                print_warning(
                    f"The following packs already exist at storage: {', '.join(existing_files)}"
                )
                print_warning(
                    f"Skipping step of uploading {self._pack_name}.zip to storage."
                )
                return task_status, True

            pack_full_path = f"{version_pack_path}/{self._pack_name}.zip"
            blob = storage_bucket.blob(pack_full_path)

            with open(zip_pack_path, "rb") as pack_zip:
                blob.upload_from_file(pack_zip)

            self.relative_storage_path = blob.name
            print_color(
                f"Uploaded {self._pack_name} pack to {pack_full_path} path.",
                LOG_COLORS.GREEN)

            return task_status, False
        except Exception as e:
            task_status = False
            print_error(
                f"Failed in uploading {self._pack_name} pack to gcs.\nAdditional info: {e}"
            )
            return task_status, True
Exemplo n.º 16
0
def update_content_version(
        content_ver: str,
        path: str = './Scripts/CommonServerPython/CommonServerPython.py'):
    regex = r'CONTENT_RELEASE_VERSION = .*'
    try:
        with open(path, 'r+') as f:
            content = f.read()
            content = re.sub(regex,
                             f"CONTENT_RELEASE_VERSION = '{content_ver}'",
                             content, re.M)
            f.seek(0)
            f.write(content)
    except Exception as ex:
        print_warning(f'Could not open CommonServerPython File - {ex}')
Exemplo n.º 17
0
def report_tests_status(preupdate_fails, postupdate_fails,
                        new_integrations_names):
    """Prints errors and/or warnings if there are any and returns whether whether testing was successful or not.

    Args:
        preupdate_fails (set): List of tuples of integrations that failed the "Test" button prior to content
            being updated on the demisto instance where each tuple is comprised of the integration name and the
            name of the instance that was configured for that integration which failed.
        postupdate_fails (set): List of tuples of integrations that failed the "Test" button after content was
            updated on the demisto instance where each tuple is comprised of the integration name and the name
            of the instance that was configured for that integration which failed.
        new_integrations_names (list): List of the names of integrations that are new since the last official
            content release and that will only be present on the demisto instance after the content update is
            performed.

    Returns:
        (bool): False if there were integration instances that succeeded prior to the content update and then
            failed after content was updated, otherwise True.
    """
    testing_status = True
    failed_pre_and_post = preupdate_fails.intersection(postupdate_fails)
    mismatched_statuses = preupdate_fails.symmetric_difference(
        postupdate_fails)
    failed_only_after_update = []
    failed_but_is_new = []
    for instance_name, integration_of_instance in mismatched_statuses:
        if integration_of_instance in new_integrations_names:
            failed_but_is_new.append((instance_name, integration_of_instance))
        else:
            failed_only_after_update.append(
                (instance_name, integration_of_instance))

    # warnings but won't fail the build step
    if failed_but_is_new:
        print_warning('New Integrations ("Test" Button) Failures')
        for instance_name, integration_of_instance in failed_but_is_new:
            print_warning('Integration: "{}", Instance: "{}"'.format(
                integration_of_instance, instance_name))
    if failed_pre_and_post:
        failure_category = '\nIntegration instances that had ("Test" Button) failures' \
                           ' both before and after the content update'
        print_warning(failure_category)
        for instance_name, integration_of_instance in failed_pre_and_post:
            print_warning('Integration: "{}", Instance: "{}"'.format(
                integration_of_instance, instance_name))

    # fail the step if there are instances that only failed after content was updated
    if failed_only_after_update:
        testing_status = False
        failure_category = '\nIntegration instances that had ("Test" Button) failures' \
                           ' only after content was updated. This indicates that your' \
                           'updates introduced breaking changes to the integration.'
        print_error(failure_category)
        for instance_name, integration_of_instance in failed_only_after_update:
            print_error('Integration: "{}", Instance: "{}"'.format(
                integration_of_instance, instance_name))

    return testing_status
Exemplo n.º 18
0
    def is_docker_image_changed(self):
        """Check if the docker image as been changed."""
        # Unnecessary to check docker image only on 5.0 and up
        if server_version_compare(self.old_script.get('fromversion', '0'), '5.0.0') < 0:
            old_docker = get_dockerimage45(self.old_script)
            new_docker = get_dockerimage45(self.current_script)
            if old_docker != new_docker and new_docker:
                print_error("Possible backwards compatibility break, You've changed the docker for the file {}"
                            " this is not allowed. Old: {}. New: {}".format(self.file_path, old_docker, new_docker))
                return True
            elif old_docker != new_docker and not new_docker:
                print_warning("Possible backwards compatibility break. You've removed "
                              "the docker image for the file {0}, make sure this isn't a mistake.  "
                              "Old image: {1}".format(self.file_path, old_docker))
                return False

        return False
Exemplo n.º 19
0
def main():
    ready_ami_list = []
    with open('./Tests/instance_ips.txt', 'r') as instance_file:
        instance_ips = instance_file.readlines()
        instance_ips = [line.strip('\n').split(":") for line in instance_ips]

    loop_start_time = time.time()
    last_update_time = loop_start_time
    instance_ips_not_created = [
        ami_instance_ip for ami_instance_name, ami_instance_ip in instance_ips
    ]

    while len(instance_ips_not_created) > 0:
        current_time = time.time()
        exit_if_timed_out(loop_start_time, current_time)

        for ami_instance_name, ami_instance_ip in instance_ips:
            if ami_instance_ip in instance_ips_not_created:
                host = "https://{}".format(ami_instance_ip)
                path = '/health'
                method = 'GET'
                try:
                    res = requests.request(method=method,
                                           url=(host + path),
                                           verify=False)
                except Exception as e:
                    print_warning(
                        'Encountered error: {}\nWill retry this step later.'.
                        format(str(e)))
                    continue
                if res.status_code == 200:
                    print("[{}] {} is ready to use".format(
                        datetime.datetime.now(), ami_instance_name))
                    # ready_ami_list.append(ami_instance_name)
                    instance_ips_not_created.remove(ami_instance_ip)
                # printing the message every 30 seconds
                elif current_time - last_update_time > PRINT_INTERVAL_IN_SECONDS:
                    print(
                        "{} at ip {} is not ready yet - waiting for it to start"
                        .format(ami_instance_name, ami_instance_ip))

        if current_time - last_update_time > PRINT_INTERVAL_IN_SECONDS:
            # The interval has passed, which means we printed a status update.
            last_update_time = current_time
        if len(instance_ips) > len(ready_ami_list):
            sleep(1)
Exemplo n.º 20
0
    def validate_file_release_notes(self):
        """Validate that the file has proper release notes when modified.

        This function updates the class attribute self._is_valid instead of passing it back and forth.
        """
        if self.is_renamed:
            print_warning("You might need RN please make sure to check that.")
            return

        if os.path.isfile(self.file_path):
            rn_path = get_release_notes_file_path(self.file_path)
            # check rn file exists and contain text
            if not os.path.isfile(rn_path) or os.stat(rn_path).st_size == 0:
                print_error(
                    'File {} is missing releaseNotes, please add.'.format(
                        self.file_path))
                self._is_valid = False
Exemplo n.º 21
0
def should_clear(file_path, current_server_version="0.0.0"):
    """
    scan folder and remove all references to release notes
    :param file_path: path of the yml/json file
    :param current_server_version: current server version
    """
    data = get_file_data(file_path)

    version = data.get('fromversion') or data.get('fromVersion')
    if version and server_version_compare(current_server_version,
                                          str(version)) < 0:
        print_warning(
            'keeping release notes for ({})\nto be published on {} version release'
            .format(file_path, version))
        return False

    return True
Exemplo n.º 22
0
def get_release_notes_draft(github_token):
    # Disable insecure warnings
    requests.packages.urllib3.disable_warnings()

    res = requests.get(
        'https://api.github.com/repos/demisto/content/releases',
        headers={'Authorization': 'token {}'.format(github_token)})
    drafts = [release for release in res.json() if release.get('draft', False)]
    if drafts:
        if len(drafts) == 1:
            return drafts[0]['body']
        else:
            print_warning(
                'Too many drafts to choose from ({}), skipping update.'.format(
                    len(drafts)))

    return ''
Exemplo n.º 23
0
def update_branch(
        path: str = './Scripts/CommonServerPython/CommonServerPython.py'):

    regex = r'CONTENT_BRANCH_NAME = .*'
    branches = run_command('git branch')
    branch_name_reg = re.search(r'\* (.*)', branches)
    branch_name = branch_name_reg.group(1)
    try:
        with open(path, 'r+') as f:
            content = f.read()
            content = re.sub(regex, f"CONTENT_BRANCH_NAME = '{branch_name}'",
                             content, re.M)
            f.seek(0)
            f.write(content)
    except Exception as ex:
        print_warning(f'Could not open CommonServerPython File - {ex}')

    return branch_name
Exemplo n.º 24
0
def should_clear(file_path, current_server_version="0.0.0"):
    """
    scan folder and remove all references to release notes
    :param file_path: path of the yml/json file
    :param current_server_version: current server version
    """
    extension = os.path.splitext(file_path)[1]
    if extension not in FILE_TYPE_DICT:
        return False

    load_function = FILE_TYPE_DICT[extension]
    with open(file_path, 'r') as f:
        data = load_function(f)

    v = data.get('fromversion') or data.get('fromVersion')
    if v and server_version_compare(current_server_version, str(v)) < 0:
        print_warning('keeping release notes for ({})\nto be published on {} version release'.format(file_path, str(v)))
        return False

    return True
Exemplo n.º 25
0
def get_test_list(files_string, branch_name):
    """Create a test list that should run"""
    modified_files, modified_tests_list, all_tests, is_conf_json, run_sample_tests = get_modified_files(
        files_string)

    tests = set([])
    if modified_files:
        tests = find_tests_for_modified_files(modified_files)

    for file_path in modified_tests_list:
        test = collect_ids(file_path)
        if test not in tests:
            tests.add(test)

    if is_conf_json:
        tests = tests.union(get_test_from_conf(branch_name))

    if all_tests:
        print_warning('Running all tests due to: {}'.format(
            ','.join(all_tests)))
        tests.add("Run all tests")

    if run_sample_tests:  # Choosing 3 random tests for infrastructure testing
        test_ids = get_test_ids(check_nightly_status=True)
        for _ in range(3):
            tests.add(random.choice(test_ids))

    if not tests:
        if modified_files or modified_tests_list or all_tests:
            print_error(
                "There are no tests that check the changes you've done, please make sure you write one"
            )
            sys.exit(1)
        else:
            print_warning("Running Sanity cehck only")
            tests.add('DocumentationTest')  # test with integration configured
            tests.add(
                'TestCommonPython')  # test with no integration configured

    return tests
Exemplo n.º 26
0
def main():
    circle_aritfact = sys.argv[1]
    envfile = sys.argv[2]

    with open(envfile, 'r') as json_file:
        env_results = json.load(json_file)

    for env in env_results:
        if not os.path.isfile("./Tests/is_build_failed_{}.txt".format(
                env["Role"].replace(' ', ''))):
            ssh_string = 'ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null {}@{} ' \
                         '"sudo chmod -R 755 /var/log/demisto"'
            scp_string = 'scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ' \
                         '{}@{}:/var/log/demisto/server.log {} || echo "WARN: Failed downloading server.log"'

            try:
                subprocess.check_output(ssh_string.format(
                    env["SSHuser"], env["InstanceDNS"]),
                                        shell=True)

            except subprocess.CalledProcessError as exc:
                print(exc.output)

            try:
                subprocess.check_output(scp_string.format(
                    env["SSHuser"], env["InstanceDNS"],
                    "{}/server_{}.log".format(circle_aritfact,
                                              env["Role"].replace(' ', ''))),
                                        shell=True)

            except subprocess.CalledProcessError as exc:
                print(exc.output)

            rminstance = aws_functions.destroy_instance(
                env["Region"], env["InstanceID"])
            if aws_functions.isError(rminstance):
                print_error(rminstance)
        else:
            print_warning("Tests failed on {} ,keeping instance alive".format(
                env["Role"]))
Exemplo n.º 27
0
def configure_integration_instance(integration, client, prints_manager):
    """
    Configure an instance for an integration

    Arguments:
        integration: (dict)
            Integration object whose params key-values are set
        client: (demisto_client)
            The client to connect to
        prints_manager: (ParallelPrintsManager)
            Print manager object

    Returns:
        (dict): Configured integration instance
    """
    integration_name = integration.get('name')
    print(
        'Configuring instance for integration "{}"\n'.format(integration_name))
    integration_instance_name = integration.get('instance_name', '')
    integration_params = integration.get('params')
    is_byoi = integration.get('byoi', True)
    validate_test = integration.get('validate_test', True)

    integration_configuration = __get_integration_config(
        client, integration_name, prints_manager)
    prints_manager.execute_thread_prints(0)
    if not integration_configuration:
        return None

    # In the integration configuration in content-test-conf conf.json, the test_validate flag was set to false
    if not validate_test:
        print_warning(
            "Skipping configuration for integration: {} (it has test_validate set to false)"
            .format(integration_name))
        return None
    module_instance = set_integration_instance_parameters(
        integration_configuration, integration_params,
        integration_instance_name, is_byoi)
    return module_instance
Exemplo n.º 28
0
def print_packs_summary(packs_list):
    """Prints summary of packs uploaded to gcs.

    Args:
        packs_list (list): list of initialized packs.

    """
    successful_packs = [
        pack for pack in packs_list if pack.status == PackStatus.SUCCESS.name
    ]
    skipped_packs = [
        pack for pack in packs_list
        if pack.status == PackStatus.PACK_ALREADY_EXISTS.name
    ]
    failed_packs = [
        pack for pack in packs_list
        if pack not in successful_packs and pack not in skipped_packs
    ]

    print("\n")
    print(
        "--------------------------------------- Packs Upload Summary ---------------------------------------"
    )
    print(f"Total number of packs: {len(packs_list)}")

    if successful_packs:
        print_color(
            f"Number of successful uploaded packs: {len(successful_packs)}",
            LOG_COLORS.GREEN)
        successful_packs_table = _build_summary_table(successful_packs)
        print_color(successful_packs_table, LOG_COLORS.GREEN)
    if skipped_packs:
        print_warning(f"Number of skipped packs: {len(skipped_packs)}")
        skipped_packs_table = _build_summary_table(skipped_packs)
        print_warning(skipped_packs_table)
    if failed_packs:
        print_error(f"Number of failed packs: {len(failed_packs)}")
        failed_packs_table = _build_summary_table(failed_packs)
        print_error(failed_packs_table)
Exemplo n.º 29
0
def main():
    """Execute FilesValidator checks on the modified changes in your branch, or all files in case of master.

    This script runs both in a local and a remote environment. In a local environment we don't have any
    logger assigned, and then pykwalify raises an error, since it is logging the validation results.
    Therefore, if we are in a local env, we set up a logger. Also, we set the logger's level to critical
    so the user won't be disturbed by non critical loggings
    """
    branches = run_command('git branch')
    branch_name_reg = re.search(r'\* (.*)', branches)
    branch_name = branch_name_reg.group(1)

    parser = argparse.ArgumentParser(description='Utility CircleCI usage')
    parser.add_argument('-c', '--circle', type=str2bool, default=False, help='Is CircleCi or not')
    parser.add_argument('-b', '--backwardComp', type=str2bool, default=True, help='To check backward compatibility.')
    parser.add_argument('-t', '--test-filter', type=str2bool, default=False, help='Check that tests are valid.')
    parser.add_argument('-p', '--prev-ver', help='Previous branch or SHA1 commit to run checks against.')
    options = parser.parse_args()
    is_circle = options.circle
    is_backward_check = options.backwardComp

    logging.basicConfig(level=logging.CRITICAL)

    print_color('Starting validating files structure', LOG_COLORS.GREEN)
    files_validator = FilesValidator(is_circle, print_ignored_files=True)
    if not files_validator.is_valid_structure(branch_name, is_backward_check=is_backward_check,
                                              prev_ver=options.prev_ver):
        sys.exit(1)
    if options.test_filter:
        try:
            print_warning('Updating idset. Be patient if this is the first time...')
            subprocess.check_output(['./Tests/scripts/update_id_set.py'])
            print_warning('Checking that we have tests for all content...')
            try:
                tests_out = subprocess.check_output(['./Tests/scripts/configure_tests.py', '-s', 'true'],
                                                    stderr=subprocess.STDOUT)
                print(tests_out)
            except Exception:
                print_warning('Recreating idset to be sure that configure tests failure is accurate.'
                              ' Be patient this can take 15-20 seconds ...')
                subprocess.check_output(['./Tests/scripts/update_id_set.py', '-r'])
                print_warning('Checking that we have tests for all content again...')
                subprocess.check_call(['./Tests/scripts/configure_tests.py', '-s', 'true'])
        except Exception as ex:
            print_error('Failed validating tests: {}'.format(ex))
            sys.exit(1)
    print_color('Finished validating files structure', LOG_COLORS.GREEN)
    sys.exit(0)
Exemplo n.º 30
0
def get_release_notes_draft(github_token, asset_id):
    """
    if possible, download current release draft from content repository in github.

    :param github_token: github token with push permission (in order to get the draft).
    :param asset_id: content build's asset id.
    :return: draft text (or empty string on error).
    """
    if github_token is None:
        print_warning('unable to download draft without github token.')
        return ''

    # Disable insecure warnings
    requests.packages.urllib3.disable_warnings()  # pylint: disable=no-member

    try:
        res = requests.get(
            'https://api.github.com/repos/demisto/content/releases',
            verify=False,  # guardrails-disable-line
            headers={'Authorization': 'token {}'.format(github_token)})
    except requests.exceptions.ConnectionError as exc:
        print_warning('unable to get release draft, reason:\n{}'.format(
            str(exc)))
        return ''

    if res.status_code != 200:
        print_warning('unable to get release draft ({}), reason:\n{}'.format(
            res.status_code, res.text))
        return ''

    drafts = [release for release in res.json() if release.get('draft', False)]
    if drafts:
        if len(drafts) == 1:
            draft_body = drafts[0]['body']
            raw_asset = re.findall(
                r'Release Notes for version .* \((\d{5,}|xxxxx)\)', draft_body,
                re.IGNORECASE)
            if raw_asset:
                draft_body = draft_body.replace(raw_asset[0], asset_id)
            return draft_body

        print_warning(
            'Too many drafts to choose from ({}), skipping update.'.format(
                len(drafts)))

    return ''
Exemplo n.º 31
0
def get_test_list(files_string, branch_name):
    """Create a test list that should run"""
    modified_files, modified_tests_list, all_tests, is_conf_json, sample_tests = get_modified_files(files_string)

    tests = set([])
    if modified_files:
        tests = find_tests_for_modified_files(modified_files)

    for file_path in modified_tests_list:
        test = collect_ids(file_path)
        if test not in tests:
            tests.add(test)

    if is_conf_json:
        tests = tests.union(get_test_from_conf(branch_name))

    if all_tests:
        print_warning('Running all tests due to: {}'.format(','.join(all_tests)))
        tests.add("Run all tests")

    if sample_tests:  # Choosing 3 random tests for infrastructure testing
        print_warning('Running sample tests due to: {}'.format(','.join(sample_tests)))
        test_ids = get_test_ids(check_nightly_status=True)
        for _ in range(3):
            tests.add(random.choice(test_ids))

    if not tests:
        if modified_files or modified_tests_list or all_tests:
            print_error("There are no tests that check the changes you've done, please make sure you write one")
            sys.exit(1)
        else:
            print_warning("Running Sanity check only")
            tests.add('DocumentationTest')  # test with integration configured
            tests.add('TestCommonPython')  # test with no integration configured

    return tests