Exemple #1
0
 def __init__(self, jh, settings=None):
     self.hostname = jh['hostname']
     self.user = jh['user']
     self.port = 22 if jh['port'] is None else jh['port']
     secret_reader = SecretReader(settings=settings)
     self.identity = secret_reader.read(jh['identity'])
     self.init_identity_file()
 def __init__(self,
              instance,
              project_id=None,
              ssl_verify=True,
              settings=None,
              project_url=None,
              saas_files=None):
     self.server = instance['url']
     secret_reader = SecretReader(settings=settings)
     token = secret_reader.read(instance['token'])
     ssl_verify = instance['sslVerify']
     if ssl_verify is None:
         ssl_verify = True
     self.gl = gitlab.Gitlab(self.server,
                             private_token=token,
                             ssl_verify=ssl_verify)
     self.gl.auth()
     self.user = self.gl.user
     if project_id is None:
         # When project_id is not provide, we try to get the project
         # using the project_url
         if project_url is not None:
             parsed_project_url = urlparse(project_url)
             name_with_namespace = parsed_project_url.path.strip('/')
             self.project = self.gl.projects.get(name_with_namespace)
     else:
         self.project = self.gl.projects.get(project_id)
     self.saas_files = saas_files
Exemple #3
0
 def __init__(self, instance, repo_url, settings):
     parsed_repo_url = urlparse(repo_url)
     repo = parsed_repo_url.path.strip('/')
     secret_reader = SecretReader(settings=settings)
     token = secret_reader.read(instance['token'])
     git_cli = github.Github(token, base_url=GH_BASE_URL)
     self.repo = git_cli.get_repo(repo)
Exemple #4
0
    def init_ocm_client(self, cluster_info):
        """
        Initiate OCM client.
        Gets the OCM information and initiates an OCM client.
        Skip initiating OCM if it has already been initialized or if
        the current integration is disabled on it.

        :param cluster_info: Graphql cluster query result

        :type cluster_info: dict
        """
        if self.cluster_disabled(cluster_info):
            return
        cluster_name = cluster_info['name']
        ocm_info = cluster_info['ocm']
        ocm_name = ocm_info['name']
        # pointer from each cluster to its referenced OCM instance
        self.clusters_map[cluster_name] = ocm_name
        if self.ocm_map.get(ocm_name):
            return

        access_token_client_id = ocm_info.get('accessTokenClientId')
        access_token_url = ocm_info.get('accessTokenUrl')
        ocm_offline_token = ocm_info.get('offlineToken')
        if ocm_offline_token is None:
            self.ocm_map[ocm_name] = False
        else:
            url = ocm_info['url']
            secret_reader = SecretReader(settings=self.settings)
            token = secret_reader.read(ocm_offline_token)
            self.ocm_map[ocm_name] = \
                OCM(url, access_token_client_id, access_token_url, token)
Exemple #5
0
def run(dry_run):
    settings = queries.get_app_interface_settings()
    gqlapi = gql.get_api()
    github = init_github()
    secret_reader = SecretReader(settings=settings)
    # Reconcile against all sentry instances
    result = gqlapi.query(SENTRY_INSTANCES_QUERY)
    for instance in result['instances']:
        token = secret_reader.read(instance['automationToken'])
        host = instance['consoleUrl']
        sentry_client = SentryClient(host, token)

        skip_user = secret_reader.read(instance['adminUser'])
        current_state = fetch_current_state(sentry_client, [skip_user])
        desired_state = fetch_desired_state(gqlapi, instance, github)

        reconciler = SentryReconciler(sentry_client, dry_run)
        reconciler.reconcile(current_state, desired_state)
Exemple #6
0
    def __init__(self, token, ssl_verify=True, settings=None):
        secret_reader = SecretReader(settings=settings)
        token_config = secret_reader.read(token)
        config = toml.loads(token_config)

        self.url = config['jenkins']['url']
        self.user = config['jenkins']['user']
        self.password = config['jenkins']['password']
        self.ssl_verify = ssl_verify
        self.should_restart = False
        self.settings = settings
Exemple #7
0
def bot_login(ctx, cluster_name):
    settings = queries.get_app_interface_settings()
    secret_reader = SecretReader(settings=settings)
    clusters = queries.get_clusters()
    clusters = [c for c in clusters if c['name'] == cluster_name]
    if len(clusters) == 0:
        print(f"{cluster_name} not found.")
        sys.exit(1)

    cluster = clusters[0]
    server = cluster['serverUrl']
    token = secret_reader.read(cluster['automationToken'])
    print(f"oc login --server {server} --token {token}")
 def __init__(self,
              workspace_name,
              token,
              settings=None,
              init_usergroups=True,
              **chat_kwargs):
     self.workspace_name = workspace_name
     secret_reader = SecretReader(settings=settings)
     slack_token = secret_reader.read(token)
     self.sc = SlackClient(slack_token)
     self.results = {}
     self.chat_kwargs = chat_kwargs
     if init_usergroups:
         self._initiate_usergroups()
Exemple #9
0
def get_config(desired_org_name=None):
    gqlapi = gql.get_api()
    orgs = gqlapi.query(ORGS_QUERY)['orgs']
    settings = queries.get_app_interface_settings()
    secret_reader = SecretReader(settings=settings)
    config = {'github': {}}
    for org in orgs:
        org_name = org['name']
        if desired_org_name and org_name != desired_org_name:
            continue
        token = secret_reader.read(org['token'])
        org_config = {'token': token, 'managed_teams': org['managedTeams']}
        config['github'][org_name] = org_config

    return config
def get_ecrypted_credentials(credentials_name, user, settings, smtp_client):
    credentials_map = settings['credentials']
    credentials_map_item = \
        [c for c in credentials_map if c['name'] == credentials_name]
    if len(credentials_map_item) != 1:
        return None
    secret = credentials_map_item[0]['secret']
    secret_reader = SecretReader(settings=settings)
    credentials = secret_reader.read(secret)
    recepient = smtp_client.get_recipient(user['org_username'])
    public_gpg_key = user['public_gpg_key']
    encrypted_credentials = \
        gpg_encrypt(credentials, recepient, public_gpg_key)

    return encrypted_credentials
Exemple #11
0
    def init_oc_client(self, cluster_info):
        cluster = cluster_info['name']
        if self.oc_map.get(cluster):
            return None
        if self.cluster_disabled(cluster_info):
            return None
        if self.internal is not None:
            # integration is executed with `--internal` or `--external`
            # filter out non matching clusters
            if self.internal and not cluster_info['internal']:
                return
            if not self.internal and cluster_info['internal']:
                return

        automation_token = cluster_info.get('automationToken')
        if automation_token is None:
            self.set_oc(
                cluster,
                OCLogMsg(log_level=logging.ERROR,
                         message=f"[{cluster}]"
                         " has no automation token"))
        else:
            server_url = cluster_info['serverUrl']
            secret_reader = SecretReader(settings=self.settings)
            token = secret_reader.read(automation_token)
            if self.use_jump_host:
                jump_host = cluster_info.get('jumpHost')
            else:
                jump_host = None
            try:
                oc_client = OC(server_url,
                               token,
                               jump_host,
                               settings=self.settings,
                               init_projects=self.init_projects,
                               init_api_resources=self.init_api_resources)
                self.set_oc(cluster, oc_client)
            except StatusCodeError:
                self.set_oc(
                    cluster,
                    OCLogMsg(log_level=logging.ERROR,
                             message=f"[{cluster}]"
                             " is unreachable"))
Exemple #12
0
def promquery(cluster, query):
    """Run a PromQL query"""

    config_data = config.get_config()
    auth = {
        'path': config_data['promql-auth']['secret_path'],
        'field': 'token'
    }
    settings = queries.get_app_interface_settings()
    secret_reader = SecretReader(settings=settings)
    prom_auth_creds = secret_reader.read(auth)
    prom_auth = requests.auth.HTTPBasicAuth(*prom_auth_creds.split(':'))

    url = f"https://prometheus.{cluster}.devshift.net/api/v1/query"

    response = requests.get(url, params={'query': query}, auth=prom_auth)
    response.raise_for_status()

    print(json.dumps(response.json(), indent=4))
def get_quay_api_store():
    """
    Returns a dictionary with a key for each Quay organization
    managed in app-interface.
    Each key contains an initiated QuayApi instance.
    """
    quay_orgs = queries.get_quay_orgs()
    settings = queries.get_app_interface_settings()
    secret_reader = SecretReader(settings=settings)
    store = {}
    for org_data in quay_orgs:
        name = org_data['name']
        server_url = org_data.get('serverUrl')
        token = secret_reader.read(org_data['automationToken'])
        store[name] = {
            'api': QuayApi(token, name, base_url=server_url),
            'teams': org_data.get('managedTeams')
        }

    return store
Exemple #14
0
def run(dry_run):
    base_url = os.environ.get('GITHUB_API', 'https://api.github.com')
    orgs = queries.get_github_orgs()
    settings = queries.get_app_interface_settings()
    secret_reader = SecretReader(settings=settings)
    error = False
    for org in orgs:
        org_name = org['name']
        token = secret_reader.read(org['token'])
        gh = Github(token, base_url=base_url)
        gh_org = gh.get_organization(org_name)

        current_2fa = gh_org.two_factor_requirement_enabled
        desired_2fa = org['two_factor_authentication'] or False
        if current_2fa != desired_2fa:
            logging.error(f"2FA mismatch for {org_name}")
            error = True

    if error:
        sys.exit(1)
Exemple #15
0
def run(dry_run):
    gqlapi = gql.get_api()
    result = gqlapi.query(REPOS_QUERY)
    config = get_config()['github-repo-invites']
    settings = queries.get_app_interface_settings()
    secret_reader = SecretReader(settings=settings)
    secret = {'path': config['secret_path'],
              'field': config['secret_field']}
    token = secret_reader.read(secret)
    g = utils.raw_github_api.RawGithubApi(token)

    urls = set()
    known_orgs = set()
    for app in result['apps_v1']:
        code_components = app['codeComponents']

        if code_components is None:
            continue

        for code_component in app['codeComponents']:
            url = code_component['url']
            urls.add(url)
            org = url[:url.rindex('/')]
            known_orgs.add(org)

    for i in g.repo_invitations():
        invitation_id = i['id']
        invitation_url = i['html_url']

        url = os.path.dirname(invitation_url)

        accept = url in urls or any(url.startswith(org) for org in known_orgs)
        if accept:
            logging.info(['accept', url])

            if not dry_run:
                g.accept_repo_invitation(invitation_id)
        else:
            logging.debug(['skipping', url])
class Report(object):
    def __init__(self, app, date):
        settings = queries.get_app_interface_settings()
        self.secret_reader = SecretReader(settings=settings)
        # standard date format
        if hasattr(date, 'strftime'):
            date = date.strftime('%Y-%m-%d')

        self.app = app
        self.date = date
        self.report_sections = {}

        # valet
        # Pending https://issues.redhat.com/browse/APPSRE-1674
        # self.add_report_section('valet', self.slo_section())

        # promotions
        self.add_report_section(
            'production_promotions',
            self.get_activity_content(self.app.get('promotions')))

        # merges to master
        self.add_report_section(
            'merges_to_master',
            self.get_activity_content(self.app.get('merge_activity')))
        # Container Vulnerabilities
        self.add_report_section(
            'container_vulnerabilities',
            self.get_vulnerability_content(
                self.app.get('container_vulnerabilities')))

    @property
    def path(self):
        return 'data/reports/{}/{}.yml'.format(self.app['name'], self.date)

    def content(self):
        return {
            '$schema': '/app-sre/report-1.yml',
            'labels': {
                'app': self.app['name']
            },
            'name': f"{self.app['name']}-{self.date}",
            'app': {
                '$ref': self.app['path']
            },
            'date': self.date,
            'contentFormatVersion': CONTENT_FORMAT_VERSION,
            'content': yaml.safe_dump(self.report_sections, sort_keys=False)
        }

    def to_yaml(self):
        return yaml.safe_dump(self.content(), sort_keys=False)

    def to_message(self):
        return {'file_path': self.path, 'content': self.to_yaml()}

    def add_report_section(self, header, content):
        if not content:
            content = None

        self.report_sections[header] = content

    def slo_section(self):
        performance_parameters = [
            pp for pp in get_performance_parameters()
            if pp['app']['path'] == self.app['path']
        ]

        metrics_availability = self.get_performance_metrics(
            performance_parameters, self.calculate_performance_availability,
            'availability')

        metrics_latency = self.get_performance_metrics(
            performance_parameters, self.calculate_performance_latency,
            'latency')

        metrics = [*metrics_availability, *metrics_latency]

        if not metrics:
            return None

        return metrics

    @staticmethod
    def get_vulnerability_content(container_vulnerabilities):
        parsed_metrics = []
        if not container_vulnerabilities:
            return parsed_metrics

        for cluster, namespaces in container_vulnerabilities.items():
            for namespace, severities in namespaces.items():
                parsed_metrics.append({
                    'cluster': cluster,
                    'namespace': namespace,
                    'vulnerabilities': severities
                })
        return parsed_metrics

    def get_performance_metrics(self, performance_parameters, method, field):
        return [
            method(pp['component'], ns, metric)
            for pp in performance_parameters for ns in pp['namespaces']
            for metric in pp.get(field, []) if metric['kind'] == 'SLO'
            if ns['cluster']['prometheus']
        ]

    def calculate_performance_availability(self, component, ns, metric):
        metric_selectors = json.loads(metric['selectors'])
        metric_name = metric['metric']

        prom_info = ns['cluster']['prometheus']
        prom_auth_creds = self.secret_reader.read(prom_info['auth'])
        prom_auth = requests.auth.HTTPBasicAuth(*prom_auth_creds.split(':'))

        # volume
        vol_selectors = metric_selectors.copy()
        vol_selectors['namespace'] = ns['name']

        prom_vol_selectors = self.promqlify(vol_selectors)
        vol_promql_query = (f"sum(increase({metric_name}"
                            f"{{{prom_vol_selectors}}}[30d]))")

        vol_promql_query_result = promql(
            prom_info['url'],
            vol_promql_query,
            auth=prom_auth,
        )

        if len(vol_promql_query_result) != 1:
            logging.error(("unexpected promql result:\n"
                           f"url: {prom_info['url']}\n"
                           f"query: {vol_promql_query}"))
            return None

        volume = int(float(vol_promql_query_result[0]['value'][1]))

        # availability
        avail_selectors = metric_selectors.copy()
        avail_selectors['namespace'] = ns['name']
        prom_avail_selectors = self.promqlify(avail_selectors)

        avail_promql_query = f"""
        sum(increase(
            {metric_name}{{{prom_avail_selectors}, code!~"5.."}}[30d]
            ))
            /
        sum(increase(
            {metric_name}{{{prom_avail_selectors}}}[30d]
            )) * 100
        """

        avail_promql_query_result = promql(
            prom_info['url'],
            avail_promql_query,
            auth=prom_auth,
        )

        if len(avail_promql_query_result) != 1:
            logging.error(("unexpected promql result:\n"
                           f"url: {prom_info['url']}\n"
                           f"query: {avail_promql_query}"))

            return None

        availability = float(avail_promql_query_result[0]['value'][1])
        target_slo = 100 - float(metric['errorBudget'])

        availability_slo_met = availability >= target_slo

        return {
            'component': component,
            'type': 'availability',
            'selectors': self.promqlify(metric_selectors),
            'total_requests': volume,
            'availability': round(availability, 2),
            'availability_slo_met': availability_slo_met,
        }

    def calculate_performance_latency(self, component, ns, metric):
        metric_selectors = json.loads(metric['selectors'])
        metric_name = metric['metric']

        selectors = metric_selectors.copy()
        selectors['namespace'] = ns['name']

        prom_info = ns['cluster']['prometheus']
        prom_auth_creds = self.secret_reader.read(prom_info['auth'])
        prom_auth = requests.auth.HTTPBasicAuth(*prom_auth_creds.split(':'))

        percentile = float(metric['percentile']) / 100

        prom_selectors = self.promqlify(selectors)
        promql_query = f"""
            histogram_quantile({percentile},
                sum by (le) (increase(
                    {metric_name}{{
                        {prom_selectors}, code!~"5.."
                    }}[30d]))
            )
        """

        result = promql(
            prom_info['url'],
            promql_query,
            auth=prom_auth,
        )

        if len(result) != 1:
            logging.error(("unexpected promql result:\n"
                           f"url: {prom_info['url']}\n"
                           f"query: {promql_query}"))

            return None

        latency = float(result[0]['value'][1])
        latency_slo_met = latency <= float(metric['threshold'])

        return {
            'component': component,
            'type': 'latency',
            'selectors': self.promqlify(metric_selectors),
            'latency': round(latency, 2),
            'latency_slo_met': latency_slo_met,
        }

    @staticmethod
    def promqlify(selectors):
        return ", ".join([f'{k}="{v}"' for k, v in selectors.items()])

    @staticmethod
    def get_activity_content(activity):
        if not activity:
            return []

        return [{
            "repo": repo,
            "total": int(results[0]),
            "success": int(results[1]),
        } for repo, results in activity.items()]
Exemple #17
0
class SaasHerder():
    """Wrapper around SaaS deployment actions."""
    def __init__(self,
                 saas_files,
                 thread_pool_size,
                 gitlab,
                 integration,
                 integration_version,
                 settings,
                 jenkins_map=None,
                 accounts=None,
                 validate=False):
        self.saas_files = saas_files
        if validate:
            self._validate_saas_files()
            if not self.valid:
                return
        self.thread_pool_size = thread_pool_size
        self.gitlab = gitlab
        self.integration = integration
        self.integration_version = integration_version
        self.settings = settings
        self.secret_reader = SecretReader(settings=settings)
        self.namespaces = self._collect_namespaces()
        self.jenkins_map = jenkins_map
        # each namespace is in fact a target,
        # so we can use it to calculate.
        divisor = len(self.namespaces) or 1
        self.available_thread_pool_size = \
            threaded.estimate_available_thread_pool_size(
                self.thread_pool_size,
                divisor)
        # if called by a single saas file,it may
        # specify that it manages resources exclusively.
        self.take_over = self._get_saas_file_attribute('takeover')
        self.compare = self._get_saas_file_attribute('compare')
        self.publish_job_logs = self._get_saas_file_attribute('publishJobLogs')
        if accounts:
            self._initiate_state(accounts)

    def _get_saas_file_attribute(self, attribute):
        return len(self.saas_files) == 1 and self.saas_files[0].get(attribute)

    def _validate_saas_files(self):
        self.valid = True
        saas_file_name_path_map = {}
        for saas_file in self.saas_files:
            saas_file_name = saas_file['name']
            saas_file_path = saas_file['path']
            saas_file_name_path_map.setdefault(saas_file_name, [])
            saas_file_name_path_map[saas_file_name].append(saas_file_path)

            saas_file_owners = [
                u['org_username'] for r in saas_file['roles']
                for u in r['users']
            ]
            if not saas_file_owners:
                msg = 'saas file {} has no owners: {}'
                logging.error(msg.format(saas_file_name, saas_file_path))
                self.valid = False

            for resource_template in saas_file['resourceTemplates']:
                resource_template_name = resource_template['name']
                for target in resource_template['targets']:
                    target_parameters = target['parameters']
                    if not target_parameters:
                        continue
                    target_parameters = json.loads(target_parameters)
                    target_namespace = target['namespace']
                    namespace_name = target_namespace['name']
                    cluster_name = target_namespace['cluster']['name']
                    environment = target_namespace['environment']
                    environment_name = environment['name']
                    environment_parameters = environment['parameters']
                    if not environment_parameters:
                        continue
                    environment_parameters = \
                        json.loads(environment_parameters)
                    msg = \
                        f'[{saas_file_name}/{resource_template_name}] ' + \
                        f'parameter found in target ' + \
                        f'{cluster_name}/{namespace_name} ' + \
                        f'should be reused from env {environment_name}'
                    for t_key, t_value in target_parameters.items():
                        if not isinstance(t_value, str):
                            continue
                        for e_key, e_value in environment_parameters.items():
                            if not isinstance(e_value, str):
                                continue
                            if '.' not in e_value:
                                continue
                            if e_value not in t_value:
                                continue
                            if t_key == e_key and t_value == e_value:
                                details = \
                                    f'consider removing {t_key}'
                            else:
                                replacement = t_value.replace(
                                    e_value, '${' + e_key + '}')
                                details = \
                                    f'target: \"{t_key}: {t_value}\". ' + \
                                    f'env: \"{e_key}: {e_value}\". ' + \
                                    f'consider \"{t_key}: {replacement}\"'
                            logging.warning(f'{msg}: {details}')

        duplicates = {
            saas_file_name: saas_file_paths
            for saas_file_name, saas_file_paths in
            saas_file_name_path_map.items() if len(saas_file_paths) > 1
        }
        if duplicates:
            self.valid = False
            msg = 'saas file name {} is not unique: {}'
            for saas_file_name, saas_file_paths in duplicates.items():
                logging.error(msg.format(saas_file_name, saas_file_paths))

    def _collect_namespaces(self):
        # namespaces may appear more then once in the result
        namespaces = []
        for saas_file in self.saas_files:
            managed_resource_types = saas_file['managedResourceTypes']
            resource_templates = saas_file['resourceTemplates']
            for rt in resource_templates:
                targets = rt['targets']
                for target in targets:
                    namespace = target['namespace']
                    if target.get('disable'):
                        logging.debug(
                            f"[{saas_file['name']}/{rt['name']}] target " +
                            f"{namespace['cluster']['name']}/" +
                            f"{namespace['name']} is disabled.")
                        continue
                    # managedResourceTypes is defined per saas_file
                    # add it to each namespace in the current saas_file
                    namespace['managedResourceTypes'] = managed_resource_types
                    namespaces.append(namespace)
        return namespaces

    def _initiate_state(self, accounts):
        self.state = State(integration=self.integration,
                           accounts=accounts,
                           settings=self.settings)

    @staticmethod
    def _collect_parameters(container):
        parameters = container.get('parameters') or {}
        if isinstance(parameters, str):
            parameters = json.loads(parameters)
        # adjust Python's True/False
        for k, v in parameters.items():
            if v is True:
                parameters[k] = 'true'
            elif v is False:
                parameters[k] = 'false'
            elif any([isinstance(v, t) for t in [dict, list, tuple]]):
                parameters[k] = json.dumps(v)
        return parameters

    @retry()
    def _get_file_contents(self, options):
        url = options['url']
        path = options['path']
        ref = options['ref']
        github = options['github']
        html_url = os.path.join(url, 'blob', ref, path)
        content = None
        if 'github' in url:
            repo_name = url.rstrip("/").replace('https://github.com/', '')
            repo = github.get_repo(repo_name)
            f = repo.get_contents(path, ref)
            content = f.decoded_content
        elif 'gitlab' in url:
            if not self.gitlab:
                raise Exception('gitlab is not initialized')
            project = self.gitlab.get_project(url)
            f = project.files.get(file_path=path.lstrip('/'), ref=ref)
            content = f.decode()

        return yaml.safe_load(content), html_url

    @retry()
    def _get_directory_contents(self, options):
        url = options['url']
        path = options['path']
        ref = options['ref']
        github = options['github']
        html_url = os.path.join(url, 'tree', ref, path)
        resources = []
        if 'github' in url:
            repo_name = url.rstrip("/").replace('https://github.com/', '')
            repo = github.get_repo(repo_name)
            for f in repo.get_contents(path, ref):
                file_path = os.path.join(path, f.name)
                file_contents = repo.get_contents(file_path, ref)
                resource = yaml.safe_load(file_contents.decoded_content)
                resources.append(resource)
        elif 'gitlab' in url:
            if not self.gitlab:
                raise Exception('gitlab is not initialized')
            project = self.gitlab.get_project(url)
            for f in project.repository_tree(path=path.lstrip('/'),
                                             ref=ref,
                                             all=True):
                file_contents = \
                    project.files.get(file_path=f['path'], ref=ref)
                resource = yaml.safe_load(file_contents.decode())
                resources.append(resource)

        return resources, html_url

    @retry()
    def _get_commit_sha(self, options):
        url = options['url']
        ref = options['ref']
        github = options['github']
        hash_length = options.get('hash_length')
        commit_sha = ''
        if 'github' in url:
            repo_name = url.rstrip("/").replace('https://github.com/', '')
            repo = github.get_repo(repo_name)
            commit = repo.get_commit(sha=ref)
            commit_sha = commit.sha
        elif 'gitlab' in url:
            if not self.gitlab:
                raise Exception('gitlab is not initialized')
            project = self.gitlab.get_project(url)
            commits = project.commits.list(ref_name=ref)
            commit_sha = commits[0].id

        if hash_length:
            return commit_sha[:hash_length]

        return commit_sha

    @staticmethod
    def _get_cluster_and_namespace(target):
        cluster = target['namespace']['cluster']['name']
        namespace = target['namespace']['name']
        return cluster, namespace

    def _process_template(self, options):
        saas_file_name = options['saas_file_name']
        resource_template_name = options['resource_template_name']
        url = options['url']
        path = options['path']
        provider = options['provider']
        target = options['target']
        github = options['github']
        target_ref = target['ref']

        resources = None
        html_url = None

        if provider == 'openshift-template':
            hash_length = options['hash_length']
            parameters = options['parameters']
            environment = target['namespace']['environment']
            environment_parameters = self._collect_parameters(environment)
            target_parameters = self._collect_parameters(target)

            consolidated_parameters = {}
            consolidated_parameters.update(environment_parameters)
            consolidated_parameters.update(parameters)
            consolidated_parameters.update(target_parameters)

            for replace_key, replace_value in consolidated_parameters.items():
                if not isinstance(replace_value, str):
                    continue
                replace_pattern = '${' + replace_key + '}'
                for k, v in consolidated_parameters.items():
                    if not isinstance(v, str):
                        continue
                    if replace_pattern in v:
                        consolidated_parameters[k] = \
                            v.replace(replace_pattern, replace_value)

            get_file_contents_options = {
                'url': url,
                'path': path,
                'ref': target_ref,
                'github': github
            }

            try:
                template, html_url = \
                    self._get_file_contents(get_file_contents_options)
            except Exception as e:
                logging.error(f"[{url}/{path}:{target_ref}] " +
                              f"error fetching template: {str(e)}")
                return None, None

            if "IMAGE_TAG" not in consolidated_parameters:
                template_parameters = template.get('parameters')
                if template_parameters is not None:
                    for template_parameter in template_parameters:
                        if template_parameter['name'] == 'IMAGE_TAG':
                            # add IMAGE_TAG only if it is required
                            get_commit_sha_options = {
                                'url': url,
                                'ref': target_ref,
                                'hash_length': hash_length,
                                'github': github
                            }
                            image_tag = self._get_commit_sha(
                                get_commit_sha_options)
                            consolidated_parameters['IMAGE_TAG'] = image_tag

            oc = OC('server', 'token', local=True)
            try:
                resources = oc.process(template, consolidated_parameters)
            except StatusCodeError as e:
                logging.error(
                    f"[{saas_file_name}/{resource_template_name}] " +
                    f"{html_url}: error processing template: {str(e)}")

        elif provider == 'directory':
            get_directory_contents_options = {
                'url': url,
                'path': path,
                'ref': target_ref,
                'github': github
            }
            try:
                resources, html_url = \
                    self._get_directory_contents(
                        get_directory_contents_options)
            except Exception as e:
                logging.error(f"[{url}/{path}:{target_ref}] " +
                              f"error fetching directory: {str(e)}")
                return None, None

        else:
            logging.error(f"[{saas_file_name}/{resource_template_name}] " +
                          f"unknown provider: {provider}")

        return resources, html_url

    def _collect_images(self, resource):
        images = set()
        # resources with pod templates
        try:
            template = resource["spec"]["template"]
            for c in template["spec"]["containers"]:
                images.add(c["image"])
        except KeyError:
            pass
        # init containers
        try:
            template = resource["spec"]["template"]
            for c in template["spec"]["initContainers"]:
                images.add(c["image"])
        except KeyError:
            pass
        # CronJob
        try:
            template = resource["spec"]["jobTemplate"]["spec"]["template"]
            for c in template["spec"]["containers"]:
                images.add(c["image"])
        except KeyError:
            pass
        # CatalogSource templates
        try:
            images.add(resource["spec"]["image"])
        except KeyError:
            pass

        return images

    @staticmethod
    def _check_image(image, image_patterns, image_auth, error_prefix):
        error = False
        if image_patterns and \
                not any(image.startswith(p) for p in image_patterns):
            error = True
            logging.error(
                f"{error_prefix} Image is not in imagePatterns: {image}")
        try:
            valid = Image(image, **image_auth)
            if not valid:
                error = True
                logging.error(f"{error_prefix} Image does not exist: {image}")
        except Exception as e:
            error = True
            logging.error(f"{error_prefix} Image is invalid: {image}. " +
                          f"details: {str(e)}")

        return error

    def _check_images(self, options):
        saas_file_name = options['saas_file_name']
        resource_template_name = options['resource_template_name']
        html_url = options['html_url']
        resources = options['resources']
        image_auth = options['image_auth']
        image_patterns = options['image_patterns']
        error_prefix = \
            f"[{saas_file_name}/{resource_template_name}] {html_url}:"

        images_list = threaded.run(self._collect_images, resources,
                                   self.available_thread_pool_size)
        images = set([item for sublist in images_list for item in sublist])
        if not images:
            return False  # no errors
        errors = threaded.run(self._check_image,
                              images,
                              self.available_thread_pool_size,
                              image_patterns=image_patterns,
                              image_auth=image_auth,
                              error_prefix=error_prefix)
        error = True in errors
        return error

    def _initiate_github(self, saas_file):
        auth = saas_file.get('authentication') or {}
        auth_code = auth.get('code') or {}
        if auth_code:
            token = self.secret_reader.read(auth_code)
        else:
            # use the app-sre token by default
            default_org_name = 'app-sre'
            config = get_config(desired_org_name=default_org_name)
            token = config['github'][default_org_name]['token']

        base_url = os.environ.get('GITHUB_API', 'https://api.github.com')
        return Github(token, base_url=base_url)

    def _initiate_image_auth(self, saas_file):
        """
        This function initiates a dict required for image authentication.
        This dict will be used as kwargs for sertoolbox's Image.
        The image authentication secret specified in the saas file must
        contain the 'user' and 'token' keys, and may optionally contain
        a 'url' key specifying the image registry url to be passed to check
        if an image should be checked using these credentials.
        The function returns the keys extracted from the secret in the
        structure expected by sretoolbox's Image:
        'user' --> 'username'
        'token' --> 'password'
        'url' --> 'auth_server' (optional)
        """
        auth = saas_file.get('authentication')
        if not auth:
            return {}
        auth_image_secret = auth.get('image')
        if not auth_image_secret:
            return {}

        creds = self.secret_reader.read_all(auth_image_secret)
        required_keys = ['user', 'token']
        ok = all(k in creds.keys() for k in required_keys)
        if not ok:
            logging.warning(
                "the specified image authentication secret " +
                f"found in path {auth_image_secret['path']} " +
                f"does not contain all required keys: {required_keys}")
            return {}

        image_auth = {'username': creds['user'], 'password': creds['token']}
        url = creds.get('url')
        if url:
            image_auth['auth_server']: url

        return image_auth

    def populate_desired_state(self, ri):
        results = threaded.run(self.init_populate_desired_state_specs,
                               self.saas_files, self.thread_pool_size)
        desired_state_specs = \
            [item for sublist in results for item in sublist]
        threaded.run(self.populate_desired_state_saas_file,
                     desired_state_specs,
                     self.thread_pool_size,
                     ri=ri)

    def init_populate_desired_state_specs(self, saas_file):
        specs = []
        saas_file_name = saas_file['name']
        github = self._initiate_github(saas_file)
        image_auth = self._initiate_image_auth(saas_file)
        instance_name = saas_file['instance']['name']
        managed_resource_types = saas_file['managedResourceTypes']
        image_patterns = saas_file['imagePatterns']
        resource_templates = saas_file['resourceTemplates']
        saas_file_parameters = self._collect_parameters(saas_file)
        # iterate over resource templates (multiple per saas_file)
        for rt in resource_templates:
            rt_name = rt['name']
            url = rt['url']
            path = rt['path']
            provider = rt.get('provider') or 'openshift-template'
            hash_length = rt.get('hash_length') or self.settings['hashLength']
            parameters = self._collect_parameters(rt)

            consolidated_parameters = {}
            consolidated_parameters.update(saas_file_parameters)
            consolidated_parameters.update(parameters)

            # iterate over targets (each target is a namespace)
            for target in rt['targets']:
                if target.get('disable'):
                    # a warning is logged during SaasHerder initiation
                    continue
                cluster, namespace = \
                    self._get_cluster_and_namespace(target)
                process_template_options = {
                    'saas_file_name': saas_file_name,
                    'resource_template_name': rt_name,
                    'url': url,
                    'path': path,
                    'provider': provider,
                    'hash_length': hash_length,
                    'target': target,
                    'parameters': consolidated_parameters,
                    'github': github
                }
                check_images_options_base = {
                    'saas_file_name': saas_file_name,
                    'resource_template_name': rt_name,
                    'image_auth': image_auth,
                    'image_patterns': image_patterns
                }
                spec = {
                    'saas_file_name': saas_file_name,
                    'cluster': cluster,
                    'namespace': namespace,
                    'managed_resource_types': managed_resource_types,
                    'process_template_options': process_template_options,
                    'check_images_options_base': check_images_options_base,
                    'instance_name': instance_name,
                    'upstream': target.get('upstream')
                }
                specs.append(spec)

        return specs

    def populate_desired_state_saas_file(self, spec, ri):
        saas_file_name = spec['saas_file_name']
        cluster = spec['cluster']
        namespace = spec['namespace']
        managed_resource_types = spec['managed_resource_types']
        process_template_options = spec['process_template_options']
        check_images_options_base = spec['check_images_options_base']
        instance_name = spec['instance_name']
        upstream = spec['upstream']

        resources, html_url = \
            self._process_template(process_template_options)
        if resources is None:
            ri.register_error()
            return
        # filter resources
        resources = [
            resource for resource in resources if isinstance(resource, dict)
            and resource['kind'] in managed_resource_types
        ]
        # check images
        skip_check_images = upstream and self.jenkins_map and \
            self.jenkins_map[instance_name].is_job_running(upstream)
        if skip_check_images:
            logging.warning(f"skipping check_image since " +
                            f"upstream job {upstream} is running")
        else:
            check_images_options = {
                'html_url': html_url,
                'resources': resources
            }
            check_images_options.update(check_images_options_base)
            image_error = self._check_images(check_images_options)
            if image_error:
                ri.register_error()
                return
        # add desired resources
        for resource in resources:
            resource_kind = resource['kind']
            resource_name = resource['metadata']['name']
            oc_resource = OR(resource,
                             self.integration,
                             self.integration_version,
                             caller_name=saas_file_name,
                             error_details=html_url)
            ri.add_desired(cluster, namespace, resource_kind, resource_name,
                           oc_resource)

    def get_moving_commits_diff(self, dry_run):
        results = threaded.run(self.get_moving_commits_diff_saas_file,
                               self.saas_files,
                               self.thread_pool_size,
                               dry_run=dry_run)
        return [item for sublist in results for item in sublist]

    def get_moving_commits_diff_saas_file(self, saas_file, dry_run):
        saas_file_name = saas_file['name']
        instace_name = saas_file['instance']['name']
        github = self._initiate_github(saas_file)
        trigger_specs = []
        for rt in saas_file['resourceTemplates']:
            rt_name = rt['name']
            url = rt['url']
            for target in rt['targets']:
                # don't trigger if there is a linked upstream job
                if target.get('upstream'):
                    continue
                ref = target['ref']
                get_commit_sha_options = {
                    'url': url,
                    'ref': ref,
                    'github': github
                }
                desired_commit_sha = \
                    self._get_commit_sha(get_commit_sha_options)
                # don't trigger on refs which are commit shas
                if ref == desired_commit_sha:
                    continue
                namespace = target['namespace']
                cluster_name = namespace['cluster']['name']
                namespace_name = namespace['name']
                env_name = namespace['environment']['name']
                key = f"{saas_file_name}/{rt_name}/{cluster_name}/" + \
                    f"{namespace_name}/{env_name}/{ref}"
                current_commit_sha = self.state.get(key, None)
                # skip if there is no change in commit sha
                if current_commit_sha == desired_commit_sha:
                    continue
                # don't trigger if this is the first time
                # this target is being deployed.
                # that will be taken care of by
                # openshift-saas-deploy-trigger-configs
                if current_commit_sha is None:
                    # store the value to take over from now on
                    if not dry_run:
                        self.state.add(key, value=desired_commit_sha)
                    continue
                # we finally found something we want to trigger on!
                job_spec = {
                    'saas_file_name': saas_file_name,
                    'env_name': env_name,
                    'instance_name': instace_name,
                    'rt_name': rt_name,
                    'cluster_name': cluster_name,
                    'namespace_name': namespace_name,
                    'ref': ref,
                    'commit_sha': desired_commit_sha
                }
                trigger_specs.append(job_spec)

        return trigger_specs

    def update_moving_commit(self, job_spec):
        saas_file_name = job_spec['saas_file_name']
        env_name = job_spec['env_name']
        rt_name = job_spec['rt_name']
        cluster_name = job_spec['cluster_name']
        namespace_name = job_spec['namespace_name']
        ref = job_spec['ref']
        commit_sha = job_spec['commit_sha']
        key = f"{saas_file_name}/{rt_name}/{cluster_name}/" + \
            f"{namespace_name}/{env_name}/{ref}"
        self.state.add(key, value=commit_sha, force=True)

    def get_configs_diff(self):
        results = threaded.run(self.get_configs_diff_saas_file,
                               self.saas_files, self.thread_pool_size)
        return [item for sublist in results for item in sublist]

    def get_configs_diff_saas_file(self, saas_file):
        saas_file_name = saas_file['name']
        saas_file_parameters = saas_file.get('parameters')
        saas_file_managed_resource_types = saas_file['managedResourceTypes']
        instace_name = saas_file['instance']['name']
        trigger_specs = []
        for rt in saas_file['resourceTemplates']:
            rt_name = rt['name']
            url = rt['url']
            path = rt['path']
            rt_parameters = rt.get('parameters')
            for desired_target_config in rt['targets']:
                namespace = desired_target_config['namespace']
                cluster_name = namespace['cluster']['name']
                namespace_name = namespace['name']
                env_name = namespace['environment']['name']
                desired_target_config['namespace'] = \
                    self.sanitize_namespace(namespace)
                # add parent parameters to target config
                desired_target_config['saas_file_parameters'] = \
                    saas_file_parameters
                # add managed resource types to target config
                desired_target_config['saas_file_managed_resource_types'] = \
                    saas_file_managed_resource_types
                desired_target_config['url'] = url
                desired_target_config['path'] = path
                desired_target_config['rt_parameters'] = rt_parameters
                # get current target config from state
                key = f"{saas_file_name}/{rt_name}/{cluster_name}/" + \
                    f"{namespace_name}/{env_name}"
                current_target_config = self.state.get(key, None)
                # skip if there is no change in target configuration
                if current_target_config == desired_target_config:
                    continue
                job_spec = {
                    'saas_file_name': saas_file_name,
                    'env_name': env_name,
                    'instance_name': instace_name,
                    'rt_name': rt_name,
                    'cluster_name': cluster_name,
                    'namespace_name': namespace_name,
                    'target_config': desired_target_config
                }
                trigger_specs.append(job_spec)

        return trigger_specs

    @staticmethod
    def sanitize_namespace(namespace):
        """Only keep fields that should trigger a new job."""
        new_job_fields = {
            'namespace': ['name', 'cluster', 'app'],
            'cluster': ['name', 'serverUrl'],
            'app': ['name']
        }
        namespace = {
            k: v
            for k, v in namespace.items() if k in new_job_fields['namespace']
        }
        cluster = namespace['cluster']
        namespace['cluster'] = {
            k: v
            for k, v in cluster.items() if k in new_job_fields['cluster']
        }
        app = namespace['app']
        namespace['app'] = {
            k: v
            for k, v in app.items() if k in new_job_fields['app']
        }
        return namespace

    def update_config(self, job_spec):
        saas_file_name = job_spec['saas_file_name']
        env_name = job_spec['env_name']
        rt_name = job_spec['rt_name']
        cluster_name = job_spec['cluster_name']
        namespace_name = job_spec['namespace_name']
        target_config = job_spec['target_config']
        key = f"{saas_file_name}/{rt_name}/{cluster_name}/" + \
            f"{namespace_name}/{env_name}"
        self.state.add(key, value=target_config, force=True)
class JJB(object):
    """Wrapper around Jenkins Jobs"""
    def __init__(self, configs, ssl_verify=True, settings=None):
        self.settings = settings
        self.secret_reader = SecretReader(settings=settings)
        self.collect_configs(configs)
        self.modify_logger()
        self.python_https_verify = str(int(ssl_verify))

    def collect_configs(self, configs):
        gqlapi = gql.get_api()
        instances = \
            {c['instance']['name']: {
                'serverUrl': c['instance']['serverUrl'],
                'token': c['instance']['token'],
                'delete_method': c['instance']['deleteMethod']}
             for c in configs}

        working_dirs = {}
        instance_urls = {}
        for name, data in instances.items():
            token = data['token']
            server_url = data['serverUrl']
            wd = tempfile.mkdtemp()
            ini = self.secret_reader.read(token)
            ini = ini.replace('"', '')
            ini = ini.replace('false', 'False')
            ini_file_path = '{}/{}.ini'.format(wd, name)
            with open(ini_file_path, 'w') as f:
                f.write(ini)
                f.write('\n')
            working_dirs[name] = wd
            instance_urls[name] = server_url

        self.sort(configs)

        for c in configs:
            instance_name = c['instance']['name']
            config = c['config']
            config_file_path = \
                '{}/config.yaml'.format(working_dirs[instance_name])
            if config:
                with open(config_file_path, 'a') as f:
                    yaml.dump(yaml.load(config, Loader=yaml.FullLoader), f)
                    f.write('\n')
            else:
                config_path = c['config_path']
                # get config data
                try:
                    config_resource = gqlapi.get_resource(config_path)
                    config = config_resource['content']
                except gql.GqlGetResourceError as e:
                    raise FetchResourceError(str(e))
                with open(config_file_path, 'a') as f:
                    f.write(config)
                    f.write('\n')

        self.instances = instances
        self.instance_urls = instance_urls
        self.working_dirs = working_dirs

    def overwrite_configs(self, configs):
        """ This function will override the existing
        config files in the working directories with
        the supplied configs """
        for name, wd in self.working_dirs.items():
            config_path = '{}/config.yaml'.format(wd)
            with open(config_path, 'w') as f:
                f.write(configs[name])

    def sort(self, configs):
        configs.sort(key=self.sort_by_name)
        configs.sort(key=self.sort_by_type)

    def sort_by_type(self, config):
        if config['type'] == 'defaults':
            return 0
        elif config['type'] == 'global-defaults':
            return 5
        elif config['type'] == 'views':
            return 10
        elif config['type'] == 'secrets':
            return 20
        elif config['type'] == 'base-templates':
            return 30
        elif config['type'] == 'global-base-templates':
            return 35
        elif config['type'] == 'job-templates':
            return 40
        elif config['type'] == 'jobs':
            return 50

    def sort_by_name(self, config):
        return config['name']

    def get_configs(self):
        """ This function gets the configs from the
        working directories """
        configs = {}
        for name, wd in self.working_dirs.items():
            config_path = '{}/config.yaml'.format(wd)
            with open(config_path, 'r') as f:
                configs[name] = f.read()

        return configs

    def generate(self, io_dir, fetch_state):
        """
        Generates job definitions from JJB configs

        :param io_dir: Input/output directory
        :param fetch_state: subdirectory to use ('desired' or 'current')
        """
        for name, wd in self.working_dirs.items():
            ini_path = '{}/{}.ini'.format(wd, name)
            config_path = '{}/config.yaml'.format(wd)

            output_dir = path.join(io_dir, 'jjb', fetch_state, name)
            args = [
                '--conf', ini_path, 'test', config_path, '-o', output_dir,
                '--config-xml'
            ]
            self.execute(args)
            throughput.change_files_ownership(io_dir)

    def print_diffs(self, io_dir):
        """ Print the diffs between the current and
        the desired job definitions """
        current_path = path.join(io_dir, 'jjb', 'current')
        current_files = self.get_files(current_path)
        desired_path = path.join(io_dir, 'jjb', 'desired')
        desired_files = self.get_files(desired_path)

        create = self.compare_files(desired_files, current_files)
        delete = self.compare_files(current_files, desired_files)
        common = self.compare_files(desired_files, current_files, in_op=True)

        self.print_diff(create, desired_path, 'create')
        self.print_diff(delete, current_path, 'delete')
        self.print_diff(common, desired_path, 'update')

    def print_diff(self, files, replace_path, action):
        for f in files:
            if action == 'update':
                ft = self.toggle_cd(f)
                equal = filecmp.cmp(f, ft)
                if equal:
                    continue

            instance, item, _ = f.replace(replace_path + '/', '').split('/')
            item_type = et.parse(f).getroot().tag
            item_type = item_type.replace('hudson.model.ListView', 'view')
            item_type = item_type.replace('project', 'job')
            logging.info([action, item_type, instance, item])

            if action == 'update':
                with open(ft) as c, open(f) as d:
                    clines = c.readlines()
                    dlines = d.readlines()

                    differ = difflib.Differ()
                    diff = [
                        l for l in differ.compare(clines, dlines)
                        if l.startswith(('-', '+'))
                    ]
                    logging.debug("DIFF:\n" + "".join(diff))

    def compare_files(self, from_files, subtract_files, in_op=False):
        return [
            f for f in from_files
            if (self.toggle_cd(f) in subtract_files) is in_op
        ]

    def get_files(self, search_path):
        return [
            path.join(root, f) for root, _, files in os.walk(search_path)
            for f in files
        ]

    def toggle_cd(self, file_name):
        if 'desired' in file_name:
            return file_name.replace('desired', 'current')
        else:
            return file_name.replace('current', 'desired')

    def update(self):
        for name, wd in self.working_dirs.items():
            ini_path = '{}/{}.ini'.format(wd, name)
            config_path = '{}/config.yaml'.format(wd)

            os.environ['PYTHONHTTPSVERIFY'] = self.python_https_verify
            cmd = ['jenkins-jobs', '--conf', ini_path, 'update', config_path]
            delete_method = self.instances[name]['delete_method']
            if delete_method != 'manual':
                cmd.append('--delete-old')
            subprocess.call(cmd)

    def get_jjb(self, args):
        from jenkins_jobs.cli.entry import JenkinsJobs
        return JenkinsJobs(args)

    def execute(self, args):
        jjb = self.get_jjb(args)
        with self.toggle_logger():
            jjb.execute()

    def modify_logger(self):
        yaml.warnings({'YAMLLoadWarning': False})
        formatter = logging.Formatter('%(levelname)s: %(message)s')
        logger = logging.getLogger()
        logger.handlers[0].setFormatter(formatter)
        self.default_logging = logger.level

    @contextmanager
    def toggle_logger(self):
        logger = logging.getLogger()
        try:
            yield logger.setLevel(logging.ERROR)
        finally:
            logger.setLevel(self.default_logging)

    def cleanup(self):
        for wd in self.working_dirs.values():
            shutil.rmtree(wd)

    def get_jobs(self, wd, name):
        ini_path = '{}/{}.ini'.format(wd, name)
        config_path = '{}/config.yaml'.format(wd)

        args = ['--conf', ini_path, 'test', config_path]
        jjb = self.get_jjb(args)
        builder = JenkinsManager(jjb.jjb_config)
        registry = ModuleRegistry(jjb.jjb_config, builder.plugins_list)
        parser = YamlParser(jjb.jjb_config)
        parser.load_files(jjb.options.path)
        jobs, _ = parser.expandYaml(registry, jjb.options.names)

        return jobs

    def get_job_webhooks_data(self):
        job_webhooks_data = {}
        for name, wd in self.working_dirs.items():
            jobs = self.get_jobs(wd, name)

            for job in jobs:
                try:
                    project_url_raw = job['properties'][0]['github']['url']
                    if 'https://github.com' in project_url_raw:
                        continue
                    job_url = \
                        '{}/project/{}'.format(self.instance_urls[name],
                                               job['name'])
                    project_url = \
                        project_url_raw.strip('/').replace('.git', '')
                    gitlab_triggers = job['triggers'][0]['gitlab']
                    mr_trigger = gitlab_triggers['trigger-merge-request']
                    trigger = 'mr' if mr_trigger else 'push'
                    hook = {
                        'job_url': job_url,
                        'trigger': trigger,
                    }
                    job_webhooks_data.setdefault(project_url, [])
                    job_webhooks_data[project_url].append(hook)
                except KeyError:
                    continue

        return job_webhooks_data

    def get_repos(self):
        repos = set()
        for name, wd in self.working_dirs.items():
            jobs = self.get_jobs(wd, name)
            for job in jobs:
                job_name = job['name']
                try:
                    repos.add(self.get_repo_url(job))
                except KeyError:
                    logging.debug('missing github url: {}'.format(job_name))
        return repos

    def get_admins(self):
        admins = set()
        for name, wd in self.working_dirs.items():
            jobs = self.get_jobs(wd, name)
            for j in jobs:
                try:
                    admins_list = \
                        j['triggers'][0]['github-pull-request']['admin-list']
                    admins.update(admins_list)
                except (KeyError, TypeError):
                    # no admins, that's fine
                    pass

        return admins

    @staticmethod
    def get_repo_url(job):
        repo_url_raw = job['properties'][0]['github']['url']
        return repo_url_raw.strip('/').replace('.git', '')

    def get_all_jobs(self,
                     job_types=[''],
                     instance_name=None,
                     include_test=False):
        all_jobs = {}
        for name, wd in self.working_dirs.items():
            if instance_name and name != instance_name:
                continue
            logging.debug(f'getting jobs from {name}')
            all_jobs[name] = []
            jobs = self.get_jobs(wd, name)
            for job in jobs:
                job_name = job['name']
                if not any(job_type in job_name for job_type in job_types):
                    continue
                if not include_test and 'test' in job_name:
                    continue
                # temporarily ignore openshift-saas-deploy jobs
                if job_name.startswith('openshift-saas-deploy'):
                    continue
                all_jobs[name].append(job)

        return all_jobs
Exemple #19
0
def fetch_current_state(unleash_instance):
    api_url = f"{unleash_instance['url']}/api"
    secret_reader = SecretReader(settings=queries.get_app_interface_settings())
    admin_access_token = \
        secret_reader.read(unleash_instance['token'])
    return get_feature_toggles(api_url, admin_access_token)