Esempio n. 1
0
def run(dry_run,
        thread_pool_size=10,
        internal=None,
        use_jump_host=True,
        defer=None):
    settings = queries.get_app_interface_settings()
    accounts = queries.get_aws_accounts()
    clusters = [c for c in queries.get_clusters(minimal=True) if c.get('ocm')]
    oc_map = OC_Map(clusters=clusters,
                    integration=QONTRACT_INTEGRATION,
                    settings=settings,
                    internal=internal,
                    use_jump_host=use_jump_host,
                    thread_pool_size=thread_pool_size)
    defer(lambda: oc_map.cleanup())
    state = State(integration=QONTRACT_INTEGRATION,
                  accounts=accounts,
                  settings=settings)

    if not dry_run:
        slack = init_slack_workspace(QONTRACT_INTEGRATION)

    now = datetime.utcnow()
    for cluster in oc_map.clusters():
        oc = oc_map.get(cluster)
        if not oc:
            logging.log(level=oc.log_level, msg=oc.message)
            continue
        upgrade_config = oc.get(namespace='openshift-managed-upgrade-operator',
                                kind='UpgradeConfig',
                                name='osd-upgrade-config',
                                allow_not_found=True)
        if not upgrade_config:
            logging.debug(f'[{cluster}] UpgradeConfig not found.')
            continue

        upgrade_spec = upgrade_config['spec']
        upgrade_at = upgrade_spec['upgradeAt']
        version = upgrade_spec['desired']['version']
        upgrade_at_obj = datetime.strptime(upgrade_at, '%Y-%m-%dT%H:%M:%SZ')
        state_key = f'{cluster}-{upgrade_at}'
        # if this is the first iteration in which 'now' had passed
        # the upgrade at date time, we send a notification
        if upgrade_at_obj < now:
            if state.exists(state_key):
                # already notified
                continue
            logging.info(['cluster_upgrade', cluster])
            if not dry_run:
                state.add(state_key)
                usergroup = f'{cluster}-cluster'
                usergroup_id = slack.get_usergroup_id(usergroup)
                slack.chat_post_message(
                    f'Heads up <!subteam^{usergroup_id}>! ' +
                    f'cluster `{cluster}` is currently ' +
                    f'being upgraded to version `{version}`')
Esempio n. 2
0
def run(dry_run,
        thread_pool_size=10,
        internal=None,
        use_jump_host=True,
        defer=None):
    settings = queries.get_app_interface_settings()
    pipelines_providers = queries.get_pipelines_providers()
    tkn_namespaces = [
        pp['namespace'] for pp in pipelines_providers
        if pp['provider'] == Providers.TEKTON
    ]

    oc_map = OC_Map(namespaces=tkn_namespaces,
                    integration=QONTRACT_INTEGRATION,
                    settings=settings,
                    internal=internal,
                    use_jump_host=use_jump_host,
                    thread_pool_size=thread_pool_size)
    defer(lambda: oc_map.cleanup())

    for pp in pipelines_providers:
        retention = pp.get('retention')
        if not retention:
            continue

        if pp['provider'] == Providers.TEKTON:
            ns_info = pp['namespace']
            namespace = ns_info['name']
            cluster = ns_info['cluster']['name']
            oc = oc_map.get(cluster)
            pipeline_runs = sorted(
                oc.get(namespace, 'PipelineRun')['items'],
                key=lambda k: k['metadata']['creationTimestamp'])

            retention_min = retention.get('minimum')
            if retention_min:
                pipeline_runs = pipeline_runs[retention_min:]

            retention_days = retention.get('days')
            for pr in pipeline_runs:
                name = pr['metadata']['name']
                if retention_days and \
                        within_retention_days(pr, retention_days):
                    continue

                logging.info([
                    'delete_trigger', cluster, namespace, 'PipelineRun', name
                ])
                if not dry_run:
                    oc.delete(namespace, 'PipelineRun', name)