Exemplo n.º 1
0
def generate_object(jsonnet_string):
    try:
        fd, path = tempfile.mkstemp()
        defer(lambda: cleanup(path))
        os.write(fd, jsonnet_string.encode())
        os.close(fd)
    except Exception as e:
        raise JsonnetError(f'Error building jsonnet file: {e}')

    try:
        jsonnet_bundler_dir = os.environ['JSONNET_VENDOR_DIR']
    except KeyError as e:
        raise JsonnetError('JSONNET_VENDOR_DIR not set')

    cmd = ['jsonnet', '-J', jsonnet_bundler_dir, path]
    status = run(cmd, stdout=PIPE, stderr=PIPE)

    if status.returncode != 0:
        message = 'Error building json doc'
        if status.stderr:
            message += ": " + status.stderr.decode()

        raise JsonnetError(message)

    return json.loads(status.stdout)
Exemplo n.º 2
0
def run(dry_run=False,
        thread_pool_size=10,
        internal=None,
        use_jump_host=True,
        take_over=True,
        defer=None):
    namespaces = [
        namespace_info for namespace_info in queries.get_namespaces()
        if namespace_info.get('limitRanges')
    ]

    namespaces = construct_resources(namespaces)

    if not namespaces:
        logging.debug("No LimitRanges definition found in app-interface!")
        sys.exit(0)

    ri, oc_map = ob.fetch_current_state(
        namespaces=namespaces,
        thread_pool_size=thread_pool_size,
        integration=QONTRACT_INTEGRATION,
        integration_version=QONTRACT_INTEGRATION_VERSION,
        override_managed_types=['LimitRange'],
        internal=internal,
        use_jump_host=use_jump_host)
    defer(lambda: oc_map.cleanup())

    add_desired_state(namespaces, ri, oc_map)
    ob.realize_data(dry_run,
                    oc_map,
                    ri,
                    enable_deletion=True,
                    take_over=take_over)
Exemplo n.º 3
0
def run(dry_run,
        thread_pool_size=10,
        internal=None,
        use_jump_host=True,
        defer=None):

    oc_map, current_state, ocm_clusters = \
        fetch_current_state(thread_pool_size, internal, use_jump_host)
    defer(lambda: oc_map.cleanup())
    desired_state = fetch_desired_state(oc_map)

    # we only manage dedicated-admins via OCM
    current_state = [
        s for s in current_state if not (
            s['cluster'] in ocm_clusters and s['group'] == 'dedicated-admins')
    ]
    desired_state = [
        s for s in desired_state if not (
            s['cluster'] in ocm_clusters and s['group'] == 'dedicated-admins')
    ]

    diffs = calculate_diff(current_state, desired_state)
    validate_diffs(diffs)
    diffs.sort(key=sort_diffs)

    for diff in diffs:
        logging.info(list(diff.values()))

        if not dry_run:
            act(diff, oc_map)
Exemplo n.º 4
0
def run(dry_run=False,
        thread_pool_size=10,
        internal=None,
        use_jump_host=True,
        defer=None):

    try:
        gqlapi = gql.get_api()
        namespaces = [
            namespace_info
            for namespace_info in gqlapi.query(NAMESPACES_QUERY)['namespaces']
            if namespace_info.get('networkPoliciesAllow')
        ]
        ri, oc_map = ob.fetch_current_state(
            namespaces=namespaces,
            thread_pool_size=thread_pool_size,
            integration=QONTRACT_INTEGRATION,
            integration_version=QONTRACT_INTEGRATION_VERSION,
            override_managed_types=['NetworkPolicy'],
            internal=internal,
            use_jump_host=use_jump_host)
        defer(lambda: oc_map.cleanup())
        fetch_desired_state(namespaces, ri, oc_map)
        ob.realize_data(dry_run, oc_map, ri)

    except Exception as e:
        msg = 'There was problem running openshift network policies reconcile.'
        msg += ' Exception: {}'
        msg = msg.format(str(e))
        logging.error(msg)
        sys.exit(1)
Exemplo n.º 5
0
def run(dry_run=False, thread_pool_size=10, internal=None,
        use_jump_host=True, defer=None):

    try:
        oc_map, current_state = \
            fetch_current_state(thread_pool_size, internal, use_jump_host)
        defer(lambda: oc_map.cleanup())
        desired_state = fetch_desired_state(oc_map)

        diffs = calculate_diff(current_state, desired_state)
        validate_diffs(diffs)
        diffs.sort(key=sort_diffs)

        for diff in diffs:
            logging.info(list(diff.values()))

            if not dry_run:
                act(diff, oc_map)

    except Exception as e:
        msg = 'There was problem running openshift groups reconcile.'
        msg += ' Exception: {}'
        msg = msg.format(str(e))
        logging.error(msg)
        sys.exit(1)
Exemplo n.º 6
0
def gpg_key_valid(public_gpg_key, defer=None):
    stripped_public_gpg_key = public_gpg_key.rstrip()
    if ' ' in stripped_public_gpg_key:
        msg = 'key has spaces in it'
        return False, msg

    equal_sign_count = public_gpg_key.count('=')
    if not stripped_public_gpg_key.endswith('=' * equal_sign_count):
        msg = 'equal signs should only appear at the end of the key'
        return False, msg

    try:
        public_gpg_key_dec = base64.b64decode(public_gpg_key)
    except Exception:
        msg = 'could not perform base64 decode of key'
        return False, msg

    gnupg_home_dir = tempfile.mkdtemp()
    defer(lambda: shutil.rmtree(gnupg_home_dir))
    proc = Popen(['gpg', '--homedir', gnupg_home_dir],
                 stdin=PIPE,
                 stdout=PIPE,
                 stderr=STDOUT)
    out = proc.communicate(public_gpg_key_dec)
    if proc.returncode != 0:
        return False, out

    keys = out[0].decode('utf-8').split('\n')
    key_types = [k.split(' ')[0] for k in keys if k]
    ok = all(elem in key_types for elem in ['pub', 'sub'])
    if not ok:
        msg = 'key must contain both pub and sub entries'
        return False, msg

    return True, ''
Exemplo n.º 7
0
def scan_history(repo_url, existing_keys, defer=None):
    logging.info('scanning {}'.format(repo_url))
    if requests.get(repo_url).status_code == 404:
        logging.info('not found {}'.format(repo_url))
        return []

    wd = tempfile.mkdtemp()
    defer(lambda: cleanup(wd))

    git.clone(repo_url, wd)
    DEVNULL = open(os.devnull, 'w')
    proc = Popen(['git', 'secrets', '--register-aws'],
                 cwd=wd, stdout=DEVNULL)
    proc.communicate()
    proc = Popen(['git', 'secrets', '--scan-history'],
                 cwd=wd, stdout=PIPE, stderr=PIPE)
    _, err = proc.communicate()
    if proc.returncode == 0:
        return []

    logging.info('found suspects in {}'.format(repo_url))
    suspected_files = get_suspected_files(err.decode('utf-8'))
    leaked_keys = get_leaked_keys(wd, suspected_files, existing_keys)
    if leaked_keys:
        logging.info('found suspected leaked keys: {}'.format(leaked_keys))

    return leaked_keys
def run(dry_run, thread_pool_size=10, internal=None,
        use_jump_host=True, defer=None):

    try:
        oc_map, current_state, ocm_clusters = \
            fetch_current_state(thread_pool_size, internal, use_jump_host)
        defer(lambda: oc_map.cleanup())
        desired_state = fetch_desired_state(oc_map)

        # we only manage dedicated-admins via OCM
        current_state = [s for s in current_state
                         if not (s['cluster'] in ocm_clusters
                                 and s['group'] == 'dedicated-admins')]
        desired_state = [s for s in desired_state
                         if not (s['cluster'] in ocm_clusters
                                 and s['group'] == 'dedicated-admins')]

        diffs = calculate_diff(current_state, desired_state)
        validate_diffs(diffs)
        diffs.sort(key=sort_diffs)

        for diff in diffs:
            logging.info(list(diff.values()))

            if not dry_run:
                act(diff, oc_map)

    except Exception as e:
        msg = 'There was problem running openshift groups reconcile.'
        msg += ' Exception: {}'
        msg = msg.format(str(e))
        logging.error(msg)
        sys.exit(1)
Exemplo n.º 9
0
def run(dry_run,
        thread_pool_size=10,
        internal=None,
        use_jump_host=True,
        defer=None):
    namespaces = [
        namespace_info for namespace_info in queries.get_namespaces()
        if namespace_info.get('managedRoles')
        and is_in_shard(f"{namespace_info['cluster']['name']}/" +
                        f"{namespace_info['name']}")
    ]
    ri, oc_map = ob.fetch_current_state(
        namespaces=namespaces,
        thread_pool_size=thread_pool_size,
        integration=QONTRACT_INTEGRATION,
        integration_version=QONTRACT_INTEGRATION_VERSION,
        override_managed_types=['RoleBinding'],
        internal=internal,
        use_jump_host=use_jump_host)
    defer(lambda: oc_map.cleanup())
    fetch_desired_state(ri, oc_map)
    ob.realize_data(dry_run, oc_map, ri)

    if ri.has_error_registered():
        sys.exit(1)
Exemplo n.º 10
0
def gpg_encrypt(content, recepient, public_gpg_key, defer=None):
    public_gpg_key_dec = base64.b64decode(public_gpg_key)

    gnupg_home_dir = tempfile.mkdtemp()
    defer(lambda: shutil.rmtree(gnupg_home_dir))
    # import public gpg key
    proc = Popen(['gpg', '--homedir', gnupg_home_dir, '--import'],
                 stdin=PIPE,
                 stdout=PIPE,
                 stderr=STDOUT)
    out = proc.communicate(public_gpg_key_dec)
    if proc.returncode != 0:
        return None
    # encrypt content
    proc = Popen([
        'gpg', '--homedir', gnupg_home_dir, '--trust-model', 'always',
        '--encrypt', '--armor', '-r', recepient
    ],
                 stdin=PIPE,
                 stdout=PIPE,
                 stderr=STDOUT)
    out = proc.communicate(content.encode())
    if proc.returncode != 0:
        return None

    return out[0].decode('utf-8')
Exemplo n.º 11
0
def run(dry_run,
        thread_pool_size=10,
        internal=None,
        use_jump_host=True,
        take_over=True,
        defer=None):

    namespaces = [
        namespace_info for namespace_info in queries.get_namespaces()
        if namespace_info.get('quota')
    ]

    if not namespaces:
        logging.debug("No ResourceQuota definition found in app-interface!")
        sys.exit(0)

    ri, oc_map = ob.fetch_current_state(
        namespaces=namespaces,
        thread_pool_size=thread_pool_size,
        integration=QONTRACT_INTEGRATION,
        integration_version=QONTRACT_INTEGRATION_VERSION,
        override_managed_types=['ResourceQuota'],
        internal=internal,
        use_jump_host=use_jump_host)
    defer(lambda: oc_map.cleanup())
    fetch_desired_state(namespaces, ri, oc_map)
    ob.realize_data(dry_run, oc_map, ri)

    if ri.has_error_registered():
        sys.exit(1)
Exemplo n.º 12
0
def run(dry_run,
        thread_pool_size=10,
        internal=None,
        use_jump_host=True,
        providers=[],
        cluster_name=None,
        namespace_name=None,
        init_api_resources=False,
        defer=None):
    gqlapi = gql.get_api()
    namespaces = [
        namespace_info
        for namespace_info in gqlapi.query(NAMESPACES_QUERY)['namespaces']
        if is_in_shard(f"{namespace_info['cluster']['name']}/" +
                       f"{namespace_info['name']}")
    ]
    namespaces = \
        filter_namespaces_by_cluster_and_namespace(
            namespaces,
            cluster_name,
            namespace_name
        )
    namespaces = canonicalize_namespaces(namespaces, providers)
    oc_map, ri = \
        fetch_data(namespaces, thread_pool_size, internal, use_jump_host,
                   init_api_resources=init_api_resources)
    defer(lambda: oc_map.cleanup())

    ob.realize_data(dry_run, oc_map, ri)

    if ri.has_error_registered():
        sys.exit(1)

    return ri
def run(dry_run=False,
        thread_pool_size=10,
        internal=None,
        use_jump_host=True,
        vault_output_path='',
        defer=None):
    namespaces = [
        namespace_info for namespace_info in queries.get_namespaces()
        if namespace_info.get('openshiftServiceAccountTokens')
    ]
    for namespace_info in namespaces:
        if not namespace_info.get('openshiftServiceAccountTokens'):
            continue
        for sat in namespace_info['openshiftServiceAccountTokens']:
            namespaces.append(sat['namespace'])

    ri, oc_map = ob.fetch_current_state(
        namespaces=namespaces,
        thread_pool_size=thread_pool_size,
        integration=QONTRACT_INTEGRATION,
        integration_version=QONTRACT_INTEGRATION_VERSION,
        override_managed_types=['Secret'],
        internal=internal,
        use_jump_host=use_jump_host)
    defer(lambda: oc_map.cleanup())
    fetch_desired_state(namespaces, ri, oc_map)
    ob.realize_data(dry_run, oc_map, ri)
    if not dry_run and vault_output_path:
        write_outputs_to_vault(vault_output_path, ri)

    if ri.has_error_registered():
        sys.exit(1)
def run(dry_run=False, thread_pool_size=10, internal=None,
        use_jump_host=True, defer=None):
    performance_parameters = queries.get_performance_parameters()
    observability_namespaces = [
        pp['namespace']['cluster']['observabilityNamespace']
        for pp in performance_parameters
        if pp['namespace']['cluster']['observabilityNamespace'] is not None]

    if not observability_namespaces:
        logging.error('No observability namespaces found')
        sys.exit(1)

    ri, oc_map = ob.fetch_current_state(
        namespaces=observability_namespaces,
        thread_pool_size=thread_pool_size,
        integration=QONTRACT_INTEGRATION,
        integration_version=QONTRACT_INTEGRATION_VERSION,
        override_managed_types=['PrometheusRule'],
        internal=internal,
        use_jump_host=use_jump_host)
    defer(lambda: oc_map.cleanup())
    fetch_desired_state(performance_parameters, ri)
    ob.realize_data(dry_run, oc_map, ri)

    if ri.has_error_registered():
        sys.exit(1)
Exemplo n.º 15
0
def run(thread_pool_size=10, defer=None):
    oc_map = tb.get_oc_map(QONTRACT_E2E_TEST)
    defer(lambda: oc_map.cleanup())
    pattern = tb.get_namespaces_pattern()
    threaded.run(test_cluster, oc_map.clusters(), thread_pool_size,
                 oc_map=oc_map,
                 pattern=pattern)
def run(dry_run, print_only=False,
        enable_deletion=False, io_dir='throughput/',
        thread_pool_size=10, internal=None, use_jump_host=True,
        light=False, vault_output_path='',
        account_name=None, defer=None):

    ri, oc_map, tf = \
        setup(dry_run, print_only, thread_pool_size, internal,
              use_jump_host, account_name)

    if not dry_run:
        defer(lambda: oc_map.cleanup())

    if print_only:
        cleanup_and_exit()
    if tf is None:
        err = True
        cleanup_and_exit(tf, err)

    if not light:
        deletions_detected, err = tf.plan(enable_deletion)
        if err:
            cleanup_and_exit(tf, err)
        if deletions_detected:
            if enable_deletion:
                tf.dump_deleted_users(io_dir)
            else:
                cleanup_and_exit(tf, deletions_detected)

    if dry_run:
        cleanup_and_exit(tf)

    if not light:
        err = tf.apply()
        if err:
            cleanup_and_exit(tf, err)

    # Temporary skip apply secret for running tf-r per account locally.
    # The integration running on the cluster will manage the secret
    # after any manual running.
    # Will refactor with caller for further operator implement.
    if account_name:
        cleanup_and_exit(tf)

    tf.populate_desired_state(ri, oc_map)

    ob.realize_data(dry_run, oc_map, ri)

    disable_keys(dry_run, thread_pool_size,
                 disable_service_account_keys=True)

    if vault_output_path:
        write_outputs_to_vault(vault_output_path, ri)

    if ri.has_error_registered():
        sys.exit(1)

    cleanup_and_exit(tf)
Exemplo n.º 17
0
def run(thread_pool_size=10, defer=None):
    oc_map = tb.get_oc_map(QONTRACT_E2E_TEST)
    defer(lambda: oc_map.cleanup())
    ns_under_test = tb.get_test_namespace_name()
    threaded.run(test_cluster,
                 oc_map.clusters(),
                 thread_pool_size,
                 oc_map=oc_map,
                 ns_under_test=ns_under_test)
def run(dry_run=False, print_only=False,
        enable_deletion=False, io_dir='throughput/',
        thread_pool_size=10, internal=None, use_jump_host=True,
        light=False, vault_output_path='', defer=None):

    try:
        ri, oc_map, tf = \
            setup(print_only, thread_pool_size, internal, use_jump_host)

        defer(lambda: oc_map.cleanup())

        if print_only:
            cleanup_and_exit()
        if tf is None:
            err = True
            cleanup_and_exit(tf, err)

        if not light:
            deletions_detected, err = tf.plan(enable_deletion)
            if err:
                cleanup_and_exit(tf, err)
            if deletions_detected:
                if enable_deletion:
                    tf.dump_deleted_users(io_dir)
                else:
                    cleanup_and_exit(tf, deletions_detected)

        if dry_run:
            cleanup_and_exit(tf)

        if not light:
            err = tf.apply()
            if err:
                cleanup_and_exit(tf, err)

        tf.populate_desired_state(ri, oc_map)

        ob.realize_data(dry_run, oc_map, ri)

        disable_keys(dry_run, thread_pool_size,
                     disable_service_account_keys=True)

        if vault_output_path:
            write_outputs_to_vault(vault_output_path, ri)

        if ri.has_error_registered():
            sys.exit(1)

    except Exception as e:
        msg = 'There was problem running terraform resource reconcile.'
        msg += ' Exception: {}'
        msg = msg.format(str(e))
        logging.error(msg)
        sys.exit(1)

    cleanup_and_exit(tf)
def run(dry_run,
        thread_pool_size=10,
        internal=None,
        use_jump_host=True,
        defer=None):
    settings = queries.get_app_interface_settings()
    accounts = queries.get_aws_accounts()
    clusters = [c for c in queries.get_clusters(minimal=True) if c.get('ocm')]
    oc_map = OC_Map(clusters=clusters,
                    integration=QONTRACT_INTEGRATION,
                    settings=settings,
                    internal=internal,
                    use_jump_host=use_jump_host,
                    thread_pool_size=thread_pool_size)
    defer(lambda: oc_map.cleanup())
    state = State(integration=QONTRACT_INTEGRATION,
                  accounts=accounts,
                  settings=settings)

    if not dry_run:
        slack = init_slack_workspace(QONTRACT_INTEGRATION)

    now = datetime.utcnow()
    for cluster in oc_map.clusters():
        oc = oc_map.get(cluster)
        if not oc:
            logging.log(level=oc.log_level, msg=oc.message)
            continue
        upgrade_config = oc.get(namespace='openshift-managed-upgrade-operator',
                                kind='UpgradeConfig',
                                name='osd-upgrade-config',
                                allow_not_found=True)
        if not upgrade_config:
            logging.debug(f'[{cluster}] UpgradeConfig not found.')
            continue

        upgrade_spec = upgrade_config['spec']
        upgrade_at = upgrade_spec['upgradeAt']
        version = upgrade_spec['desired']['version']
        upgrade_at_obj = datetime.strptime(upgrade_at, '%Y-%m-%dT%H:%M:%SZ')
        state_key = f'{cluster}-{upgrade_at}'
        # if this is the first iteration in which 'now' had passed
        # the upgrade at date time, we send a notification
        if upgrade_at_obj < now:
            if state.exists(state_key):
                # already notified
                continue
            logging.info(['cluster_upgrade', cluster])
            if not dry_run:
                state.add(state_key)
                usergroup = f'{cluster}-cluster'
                usergroup_id = slack.get_usergroup_id(usergroup)
                slack.chat_post_message(
                    f'Heads up <!subteam^{usergroup_id}>! ' +
                    f'cluster `{cluster}` is currently ' +
                    f'being upgraded to version `{version}`')
def run(dry_run=False, io_dir='throughput/', compare=True, defer=None):
    jjb = init_jjb()
    defer(lambda: jjb.cleanup())
    if compare:
        validate_repos_and_admins(jjb)

    if dry_run:
        jjb.test(io_dir, compare=compare)
    else:
        jjb.update()
Exemplo n.º 21
0
def run(dry_run=False,
        print_only=False,
        enable_deletion=False,
        thread_pool_size=10,
        defer=None):
    settings = queries.get_app_interface_settings()
    desired_state = fetch_desired_state(settings)

    # check there are no repeated vpc connection names
    connection_names = [c['connection_name'] for c in desired_state]
    if len(set(connection_names)) != len(connection_names):
        logging.error("duplicated vpc connection names found")
        sys.exit(1)

    participating_accounts = \
        [item['account'] for item in desired_state]
    participating_account_names = \
        [a['name'] for a in participating_accounts]
    accounts = [
        a for a in queries.get_aws_accounts()
        if a['name'] in participating_account_names
    ]

    ts = Terrascript(QONTRACT_INTEGRATION,
                     "",
                     thread_pool_size,
                     accounts,
                     settings=settings)
    ts.populate_additional_providers(participating_accounts)
    ts.populate_vpc_peerings(desired_state)
    working_dirs = ts.dump(print_only=print_only)

    if print_only:
        sys.exit()

    tf = Terraform(QONTRACT_INTEGRATION, QONTRACT_INTEGRATION_VERSION, "",
                   working_dirs, thread_pool_size)

    if tf is None:
        sys.exit(1)

    defer(lambda: tf.cleanup())

    deletions_detected, err = tf.plan(enable_deletion)
    if err:
        sys.exit(1)
    if deletions_detected and not enable_deletion:
        sys.exit(1)

    if dry_run:
        return

    err = tf.apply()
    if err:
        sys.exit(1)
def run(dry_run,
        thread_pool_size=10,
        saas_file_name=None,
        env_name=None,
        defer=None):
    saas_files = queries.get_saas_files(saas_file_name, env_name)
    if not saas_files:
        logging.error('no saas files found')
        sys.exit(1)

    instance = queries.get_gitlab_instance()
    desired_jenkins_instances = [s['instance']['name'] for s in saas_files]
    jenkins_map = jenkins_base.get_jenkins_map(
        desired_instances=desired_jenkins_instances)
    settings = queries.get_app_interface_settings()
    try:
        gl = GitLabApi(instance, settings=settings)
    except Exception:
        # allow execution without access to gitlab
        # as long as there are no access attempts.
        gl = None

    saasherder = SaasHerder(saas_files,
                            thread_pool_size=thread_pool_size,
                            gitlab=gl,
                            integration=QONTRACT_INTEGRATION,
                            integration_version=QONTRACT_INTEGRATION_VERSION,
                            settings=settings,
                            jenkins_map=jenkins_map)
    if not saasherder.valid:
        sys.exit(1)
    if len(saasherder.namespaces) == 0:
        logging.warning('no targets found')
        sys.exit(0)

    ri, oc_map = ob.fetch_current_state(
        namespaces=saasherder.namespaces,
        thread_pool_size=thread_pool_size,
        integration=QONTRACT_INTEGRATION,
        integration_version=QONTRACT_INTEGRATION_VERSION)
    defer(lambda: oc_map.cleanup())
    saasherder.populate_desired_state(ri)
    # if saas_file_name is defined, the integration
    # is being called from multiple running instances
    ob.realize_data(dry_run,
                    oc_map,
                    ri,
                    caller=saas_file_name,
                    wait_for_namespace=True,
                    no_dry_run_skip_compare=True,
                    take_over=saasherder.take_over)

    if ri.has_error_registered():
        sys.exit(1)
Exemplo n.º 23
0
def run(dry_run=False, thread_pool_size=10, internal=None,
        use_jump_host=True, defer=None):
    oc_map, current_state = \
        fetch_current_state(thread_pool_size, internal, use_jump_host)
    defer(lambda: oc_map.cleanup())
    desired_state = fetch_desired_state(oc_map)

    diffs = calculate_diff(current_state, desired_state)

    for diff in diffs:
        logging.info(list(diff.values()))

        if not dry_run:
            act(diff, oc_map)
Exemplo n.º 24
0
def run(dry_run=False,
        thread_pool_size=10,
        disable_service_account_keys=False,
        defer=None):
    accounts = queries.get_aws_accounts()
    settings = queries.get_app_interface_settings()
    aws = AWSApi(thread_pool_size, accounts, settings=settings)
    keys_to_delete = get_keys_to_delete(accounts)
    working_dirs = init_tf_working_dirs(accounts, thread_pool_size, settings)
    defer(lambda: cleanup(working_dirs))
    error = aws.delete_keys(dry_run, keys_to_delete, working_dirs,
                            disable_service_account_keys)
    if error:
        sys.exit(1)
Exemplo n.º 25
0
def run(dry_run, thread_pool_size=10, internal=None,
        use_jump_host=True, defer=None):
    oc_map, desired_state = get_desired_state(internal, use_jump_host,
                                              thread_pool_size)
    defer(lambda: oc_map.cleanup())
    results = threaded.run(check_ns_exists, desired_state, thread_pool_size,
                           oc_map=oc_map)
    specs_to_create = [spec for spec, create in results if create]

    for spec in specs_to_create:
        logging.info(['create', spec['cluster'], spec['namespace']])

        if not dry_run:
            create_new_project(spec, oc_map)
def run(dry_run, print_only=False,
        enable_deletion=False, io_dir='throughput/',
        thread_pool_size=10, internal=None, use_jump_host=True,
        light=False, vault_output_path='', defer=None):

    ri, oc_map, tf = \
        setup(print_only, thread_pool_size, internal, use_jump_host)

    defer(lambda: oc_map.cleanup())

    if print_only:
        cleanup_and_exit()
    if tf is None:
        err = True
        cleanup_and_exit(tf, err)

    if not light:
        deletions_detected, err = tf.plan(enable_deletion)
        if err:
            cleanup_and_exit(tf, err)
        if deletions_detected:
            if enable_deletion:
                tf.dump_deleted_users(io_dir)
            else:
                cleanup_and_exit(tf, deletions_detected)

    if dry_run:
        cleanup_and_exit(tf)

    if not light:
        err = tf.apply()
        if err:
            cleanup_and_exit(tf, err)

    tf.populate_desired_state(ri, oc_map)

    ob.realize_data(dry_run, oc_map, ri)

    disable_keys(dry_run, thread_pool_size,
                 disable_service_account_keys=True)

    if vault_output_path:
        write_outputs_to_vault(vault_output_path, ri)

    if ri.has_error_registered():
        sys.exit(1)

    cleanup_and_exit(tf)
Exemplo n.º 27
0
def run(dry_run=False, io_dir='throughput/', compare=True, defer=None):
    jjb = init_jjb()
    defer(lambda: jjb.cleanup())

    try:
        if compare:
            validate_repos_and_admins(jjb)
        if dry_run:
            jjb.test(io_dir, compare=compare)
        else:
            jjb.update()
    except Exception as e:
        msg = 'Error running integration. '
        msg += 'Exception: {}'
        msg = msg.format(str(e))
        logging.error(msg)
        sys.exit(1)
def run(defer=None):
    oc_map = tb.get_oc_map(QONTRACT_E2E_TEST)
    defer(lambda: oc_map.cleanup())
    pattern = tb.get_namespaces_pattern()
    for cluster in oc_map.clusters():
        oc = oc_map.get(cluster)
        logging.info("[{}] validating RoleBindings".format(cluster))

        projects = [p['metadata']['name']
                    for p in oc.get_all('Project')['items']
                    if p['status']['phase'] != 'Terminating' and
                    not re.search(pattern, p['metadata']['name']) and
                    'api.openshift.com/id'
                    not in p['metadata'].get('labels', {})]

        for project in projects:
            logging.info("[{}/{}] validating RoleBindings".format(
                cluster, project))
            dat.test_project_admin_rolebindings(oc, project)
def run(dry_run, thread_pool_size=10, internal=None,
        use_jump_host=True, vault_output_path='', defer=None):
    namespaces = canonicalize_namespaces(queries.get_serviceaccount_tokens())
    ri, oc_map = ob.fetch_current_state(
        namespaces=namespaces,
        thread_pool_size=thread_pool_size,
        integration=QONTRACT_INTEGRATION,
        integration_version=QONTRACT_INTEGRATION_VERSION,
        override_managed_types=['Secret'],
        internal=internal,
        use_jump_host=use_jump_host)
    defer(lambda: oc_map.cleanup())
    fetch_desired_state(namespaces, ri, oc_map)
    ob.realize_data(dry_run, oc_map, ri)
    if not dry_run and vault_output_path:
        write_outputs_to_vault(vault_output_path, ri)

    if ri.has_error_registered():
        sys.exit(1)
def run(dry_run,
        thread_pool_size=10,
        internal=None,
        use_jump_host=True,
        defer=None):

    try:
        gqlapi = gql.get_api()

        namespaces = []
        for namespace_info in gqlapi.query(NAMESPACES_QUERY)['namespaces']:
            if not namespace_info.get('networkPoliciesAllow'):
                continue

            shard_key = (f"{namespace_info['cluster']['name']}/"
                         f"{namespace_info['name']}")

            if not is_in_shard(shard_key):
                continue

            namespaces.append(namespace_info)

        ri, oc_map = ob.fetch_current_state(
            namespaces=namespaces,
            thread_pool_size=thread_pool_size,
            integration=QONTRACT_INTEGRATION,
            integration_version=QONTRACT_INTEGRATION_VERSION,
            override_managed_types=['NetworkPolicy'],
            internal=internal,
            use_jump_host=use_jump_host)
        defer(lambda: oc_map.cleanup())
        fetch_desired_state(namespaces, ri, oc_map)
        ob.realize_data(dry_run, oc_map, ri)

        if ri.has_error_registered():
            sys.exit(1)

    except Exception as e:
        msg = 'There was problem running openshift network policies reconcile.'
        msg += ' Exception: {}'
        msg = msg.format(str(e))
        logging.error(msg)
        sys.exit(1)