def delete_old_services(config, application, version, release, execute): '''Delete old releases''' namespace = config.get('kubernetes_namespace') kubectl_login(config) data = kubectl_get(namespace, 'services', '-l', 'application={}'.format(application)) services = data['items'] target_service_name = '{}-{}'.format(application, release) services_to_delete = [] service_found = False for service in sorted(services, key=lambda d: d['metadata']['name'], reverse=True): service_name = service['metadata']['name'] if service_name == target_service_name: service_found = True else: services_to_delete.append(service_name) if not service_found: error('Service {} was not found.'.format(target_service_name)) raise click.Abort() for service_name in services_to_delete: info('Deleting service {}..'.format(service_name)) cluster_id = config.get('kubernetes_cluster') namespace = config.get('kubernetes_namespace') path = '/kubernetes-clusters/{}/namespaces/{}/services/{}'.format( cluster_id, namespace, service_name) response = request(config, requests.delete, path) change_request_id = response.json()['id'] if execute: approve_and_execute(config, change_request_id) else: print(change_request_id)
def configure(config): '''Configure GitHub access''' emails = config.get('emails', []) if not emails: try: emails = [get_git_email()] except: pass emails = click.prompt('Your email addresses (comma separated)', default=','.join(emails) or None) token = click.prompt('Your personal GitHub access token', hide_input=True, default=config.get('github_access_token')) emails = list([mail.strip() for mail in emails.split(',')]) config = {'emails': emails, 'github_access_token': token} repositories = {} with Action('Scanning repositories..') as act: for repo in get_repos(token): repositories[repo['url']] = repo act.progress() path = os.path.join(CONFIG_DIR, 'repositories.yaml') os.makedirs(CONFIG_DIR, exist_ok=True) with open(path, 'w') as fd: yaml.safe_dump(repositories, fd) with Action('Storing configuration..'): stups_cli.config.store_config(config, 'github-maintainer-cli')
def scale_deployment(config, application, version, release, replicas, execute): '''Scale a single deployment''' namespace = config.get('kubernetes_namespace') kubectl_login(config) deployment_name = '{}-{}-{}'.format(application, version, release) info('Scaling deployment {} to {} replicas..'.format( deployment_name, replicas)) resources_update = ResourcesUpdate() resources_update.set_number_of_replicas(deployment_name, replicas) cluster_id = config.get('kubernetes_cluster') namespace = config.get('kubernetes_namespace') path = '/kubernetes-clusters/{}/namespaces/{}/resources'.format( cluster_id, namespace) response = request(config, requests.patch, path, json=resources_update.to_dict()) change_request_id = response.json()['id'] if execute: approve_and_execute(config, change_request_id) else: print(change_request_id)
def scm_source(config, team, artifact, tag, url, output): '''Show SCM source information such as GIT revision''' set_pierone_url(config, url) token = get_token() tags = get_tags(config.get('url'), team, artifact, token) if not tag: tag = [t['name'] for t in tags] rows = [] for t in tag: row = request(config.get('url'), '/teams/{}/artifacts/{}/tags/{}/scm-source'.format(team, artifact, t), token).json() if not row: row = {} row['tag'] = t matching_tag = [d for d in tags if d['name'] == t] row['created_by'] = ''.join([d['created_by'] for d in matching_tag]) if matching_tag: row['created_time'] = parse_time(''.join([d['created'] for d in matching_tag])) rows.append(row) rows.sort(key=lambda row: (row['tag'], row.get('created_time'))) with OutputFormat(output): print_table(['tag', 'author', 'url', 'revision', 'status', 'created_time', 'created_by'], rows, titles={'tag': 'Tag', 'created_by': 'By', 'created_time': 'Created', 'url': 'URL', 'revision': 'Revision', 'status': 'Status'}, max_column_widths={'revision': 10})
def scm_source(config, team, artifact, tag, url, output): '''Show SCM source information such as GIT revision''' set_pierone_url(config, url) token = get_token() tags = get_tags(config.get('url'), team, artifact, token) if not tags: raise click.UsageError('Artifact or Team does not exist! ' 'Please double check for spelling mistakes.') if not tag: tag = [t['name'] for t in tags] rows = [] for t in tag: r = request(config.get('url'), '/teams/{}/artifacts/{}/tags/{}/scm-source'.format(team, artifact, t), token, True) if r is None: row = {} else: row = r.json() row['tag'] = t matching_tag = [d for d in tags if d['name'] == t] row['created_by'] = ''.join([d['created_by'] for d in matching_tag]) if matching_tag: row['created_time'] = parse_time(''.join([d['created'] for d in matching_tag])) rows.append(row) rows.sort(key=lambda row: (row['tag'], row.get('created_time'))) with OutputFormat(output): print_table(['tag', 'author', 'url', 'revision', 'status', 'created_time', 'created_by'], rows, titles={'tag': 'Tag', 'created_by': 'By', 'created_time': 'Created', 'url': 'URL', 'revision': 'Revision', 'status': 'Status'}, max_column_widths={'revision': 10})
def delete(config, type, resource, execute): '''Delete a Kubernetes resource or Cloud Formation stack''' if type == 'kubernetes': parts = resource.split('/') if len(parts) != 2: error('Kubernetes resource must be KIND/NAME') raise click.Abort() kind, name = parts info('Deleting Kubernetes {} {}..'.format(kind, name)) cluster_id = config.get('kubernetes_cluster') namespace = config.get('kubernetes_namespace') path = '/kubernetes-clusters/{}/namespaces/{}/{}/{}'.format( cluster_id, namespace, kind, name) else: info('Deleting Cloud Formation stack {}..'.format(resource)) aws_account = config.get('aws_account') aws_region = config.get('aws_region') path = '/aws-accounts/{}/regions/{}/cloudformation-stacks/{}'.format( aws_account, aws_region, resource) response = request(config, requests.delete, path) change_request_id = response.json()['id'] if execute: approve_and_execute(config, change_request_id) else: print(change_request_id)
def kubectl_login(config): arg = config.get('kubernetes_api_server') if not arg: # this requires zkubectl to be configured appropriately # with the Cluster Registry URL arg = config.get('kubernetes_cluster') subprocess.check_call(['zkubectl', 'login', arg])
def configure(config): """Configure GitHub access""" emails = config.get("emails", []) if not emails: try: emails = [get_git_email()] except: pass emails = click.prompt("Your email addresses (comma separated)", default=",".join(emails) or None) token = click.prompt( "Your personal GitHub access token", hide_input=True, default=config.get("github_access_token") ) emails = list([mail.strip() for mail in emails.split(",")]) config = {"emails": emails, "github_access_token": token} repositories = {} with Action("Scanning repositories..") as act: for repo in get_repos(token): repositories[repo["url"]] = repo act.progress() path = os.path.join(CONFIG_DIR, "repositories.yaml") os.makedirs(CONFIG_DIR, exist_ok=True) with open(path, "w") as fd: yaml.safe_dump(repositories, fd) with Action("Storing configuration.."): stups_cli.config.store_config(config, "github-maintainer-cli")
def login(args: list): config = stups_cli.config.load_config(APP_NAME) if args: cluster_or_url = args[0] else: cluster_or_url = click.prompt( 'Cluster ID or URL of Kubernetes API server') if len(cluster_or_url.split(':')) >= 3: # looks like a Cluster ID (aws:123456789012:eu-central-1:kube-1) cluster_id = cluster_or_url cluster_registry = config.get('cluster_registry') if not cluster_registry: cluster_registry = fix_url(click.prompt('URL of Cluster Registry')) url = get_api_server_url_for_cluster_id(cluster_registry, cluster_id) elif looks_like_url(cluster_or_url): url = cluster_or_url else: alias = cluster_or_url cluster_registry = config.get('cluster_registry') if not cluster_registry: cluster_registry = fix_url(click.prompt('URL of Cluster Registry')) url = get_api_server_url_for_alias(cluster_registry, alias) url = fix_url(url) config['api_server'] = url stups_cli.config.store_config(config, APP_NAME) return url
def traffic(config, application, release, percent, execute): cluster_id = config.get('kubernetes_cluster') namespace = config.get('kubernetes_namespace') ingress = kubectl_get(namespace, 'ingresses', application) if release is None and percent is None: print(json.dumps(get_ingress_backends(ingress))) return backend = '{}-{}'.format(application, release) backend_weights = calculate_backend_weights(ingress, backend, percent) if len(backend_weights) == 0: error('Failed to find ingress backends {}'.format(backend)) raise click.Abort() # update ingress resource resources_update = ResourcesUpdate() resources_update.set_annotation(application, # ~1 == / in json patch INGRESS_BACKEND_WEIGHT_ANNOTATION_KEY.replace('/', '~1'), json.dumps(backend_weights), 'ingresses') path = '/kubernetes-clusters/{}/namespaces/{}/resources'.format(cluster_id, namespace) response = request(config, requests.patch, path, json=resources_update.to_dict()) change_request_id = response.json()['id'] if execute: approve_and_execute(config, change_request_id) else: print(change_request_id)
def list_violations(config, output, since, region, meta, remeta, limit, all, **kwargs): '''List violations''' url = config.get('url') if not url: raise click.ClickException( 'Missing configuration URL. Please run "stups configure".') kwargs['accounts'] = kwargs.get('accounts') or config.get('accounts') token = get_token() params = {'size': limit, 'sort': 'id,DESC'} params['from'] = parse_since(since) params['application-ids'] = kwargs.get('applications') params['application-version-ids'] = kwargs.get('application_versions') params.update(kwargs) r = request(url, '/api/violations', token, params=params) r.raise_for_status() data = r.json()['content'] if (all): params['checked'] = 'true' r = request(url, '/api/violations', token, params=params) r.raise_for_status() data.extend(r.json()['content']) rows = [] for row in data: if region and row['region'] != region: continue if meta and not meta_matches(row['meta_info'], meta): continue if remeta and not meta_matches_re(format_meta_info(row['meta_info']), remeta): continue row['violation_type'] = row['violation_type']['id'] row['created_time'] = parse_time(row['created']) row['meta_info'] = format_meta_info(row['meta_info']) rows.append(row) # we get the newest violations first, but we want to print them in order rows.reverse() with OutputFormat(output): print_table( [ 'account_id', 'region', 'id', 'violation_type', 'instance_id', 'application_id', 'application_version_id', 'meta_info', 'comment', 'created_time' ], rows, titles={ 'created_time': 'Created', 'application_id': 'Application', 'application_version_id': 'Application Version' })
def configure(config): '''Configure fullstop. CLI''' url = click.prompt('Fullstop URL', default=config.get('url'), type=UrlType()) accounts = click.prompt('AWS account IDs (comma separated)', default=config.get('accounts')) config = {'url': url, 'accounts': accounts} with Action('Storing configuration..'): stups_cli.config.store_config(config, 'fullstop')
def tags(config, team: str, artifact, url, output, limit): '''List all tags for a given team''' set_pierone_url(config, url) token = get_token() if limit is None: # show 20 rows if artifact was given, else show only 3 limit = 20 if artifact else 3 if not artifact: artifact = get_artifacts(config.get('url'), team, token) if not artifact: raise click.UsageError( 'The Team you are looking for does not exist or ' 'we could not find any artifacts registered in Pierone! ' 'Please double check for spelling mistakes.') registry = config.get('url') if registry.startswith('https://'): registry = registry[8:] slice_from = -limit rows = [] for art in artifact: image = DockerImage(registry=registry, team=team, artifact=art, tag=None) try: tags = get_image_tags(image, token) except Unauthorized as e: raise click.ClickException(str(e)) else: if not tags: raise click.UsageError( 'Artifact or Team does not exist! ' 'Please double check for spelling mistakes.') rows.extend(tags[slice_from:]) # sorts are guaranteed to be stable, i.e. tags will be sorted by time (as returned from REST service) rows.sort(key=lambda row: (row['team'], row['artifact'])) with OutputFormat(output): titles = { 'created_time': 'Created', 'created_by': 'By', 'severity_fix_available': 'Fixable CVE Severity', 'severity_no_fix_available': 'Unfixable CVE Severity' } print_table([ 'team', 'artifact', 'tag', 'created_time', 'created_by', 'severity_fix_available', 'severity_no_fix_available' ], rows, titles=titles, styles=CVE_STYLES)
def list_violations(config, output, since, region, meta, remeta, limit, all, **kwargs): '''List violations''' url = config.get('url') if not url: raise click.ClickException('Missing configuration URL. Please run "stups configure".') kwargs['accounts'] = kwargs.get('accounts') or config.get('accounts') token = get_token() params = {'size': limit, 'sort': 'id,DESC'} params['from'] = parse_since(since) params['application-ids'] = kwargs.get('applications') params['application-version-ids'] = kwargs.get('application_versions') params.update(kwargs) r = request(url, '/api/violations', token, params=params) r.raise_for_status() data = r.json()['content'] if (all): params['checked'] = 'true' r = request(url, '/api/violations', token, params=params) r.raise_for_status() data.extend(r.json()['content']) rows = [] for row in data: if region and row['region'] != region: continue if meta and not meta_matches(row['meta_info'], meta): continue if remeta and not meta_matches_re(format_meta_info(row['meta_info']), remeta): continue row['violation_type'] = row['violation_type']['id'] row['created_time'] = parse_time(row['created']) row['meta_info'] = format_meta_info(row['meta_info']) rows.append(row) # we get the newest violations first, but we want to print them in order rows.reverse() with OutputFormat(output): print_table(['account_id', 'region', 'id', 'violation_type', 'instance_id', 'application_id', 'application_version_id', 'meta_info', 'comment', 'created_time'], rows, titles={'created_time': 'Created', 'application_id': 'Application', 'application_version_id': 'Application Version'})
def apply(config, template_or_directory, parameter, execute): '''Apply CloudFormation or Kubernetes resource''' template_paths = [] if os.path.isdir(template_or_directory): for entry in os.listdir(template_or_directory): if entry.endswith('.yaml') and not entry.startswith('.'): template_paths.append(os.path.join(template_or_directory, entry)) else: template_paths.append(template_or_directory) context = parse_parameters(parameter) namespace = config.get('kubernetes_namespace') # try to find previous release of a service. data = kubectl_get(namespace, 'services', '-l', 'application={}'.format(context['application'])) context["prev_release"] = get_prev_release(data['items'], context['release']) for path in template_paths: with open(path, 'r') as fd: data = _render_template(fd, context) if not isinstance(data, dict): error('Invalid YAML contents in {}'.format(path)) raise click.Abort() if 'kind' in data: info('Applying Kubernetes manifest {}..'.format(path)) cluster_id = config.get('kubernetes_cluster') namespace = config.get('kubernetes_namespace') path = '/kubernetes-clusters/{}/namespaces/{}/resources'.format(cluster_id, namespace) response = request(config, requests.post, path, json=data) change_request_id = response.json()['id'] elif 'Resources' in data: info('Applying Cloud Formation template {}..'.format(path)) aws_account = config.get('aws_account') aws_region = config.get('aws_region') stack_name = data.get('Metadata', {}).get('StackName') if not stack_name: error('Cloud Formation template requires Metadata/StackName property') raise click.Abort() path = '/aws-accounts/{}/regions/{}/cloudformation-stacks/{}'.format( aws_account, aws_region, stack_name) response = request(config, requests.put, path, json=data) change_request_id = response.json()['id'] else: error('Neither a Kubernetes manifest nor a Cloud Formation template: {}'.format(path)) raise click.Abort() if execute: approve_and_execute(config, change_request_id) else: print(change_request_id)
def switch_deployment(config, application, version, release, ratio, execute): '''Switch to new release''' namespace = config.get('kubernetes_namespace') kubectl_login(config) target_replicas, total = ratio.split('/') target_replicas = int(target_replicas) total = int(total) data = kubectl_get(namespace, 'deployments', '-l', 'application={}'.format(application)) deployments = data['items'] target_deployment_name = '{}-{}-{}'.format(application, version, release) target_deployment_exists = False for deployment in deployments: if deployment['metadata']['name'] == target_deployment_name: target_deployment_exists = True if not target_deployment_exists: error("Deployment {} does not exist!".format(target_deployment_name)) exit(1) resources_update = ResourcesUpdate() remaining_replicas = total - target_replicas for deployment in sorted(deployments, key=lambda d: d['metadata']['name'], reverse=True): deployment_name = deployment['metadata']['name'] if deployment_name == target_deployment_name: replicas = target_replicas else: # maybe spread across all other deployments? replicas = remaining_replicas remaining_replicas = 0 info('Scaling deployment {} to {} replicas..'.format( deployment_name, replicas)) resources_update.set_number_of_replicas(deployment_name, replicas) cluster_id = config.get('kubernetes_cluster') namespace = config.get('kubernetes_namespace') path = '/kubernetes-clusters/{}/namespaces/{}/resources'.format( cluster_id, namespace) response = request(config, requests.patch, path, json=resources_update.to_dict()) change_request_id = response.json()['id'] if execute: approve_and_execute(config, change_request_id) else: print(change_request_id)
def cli(ctx): config = stups_cli.config.load_config("github-maintainer-cli") emails = config.get("emails") token = config.get("github_access_token") if not "configure".startswith(ctx.invoked_subcommand or "x"): if not emails: raise click.UsageError('No emails configured. Please run "configure".') if not token: raise click.UsageError('No GitHub access token configured. Please run "configure".') ctx.obj = config
def request(config: dict, method, path: str, headers=None, exit_on_error=True, **kwargs): token = zign.api.get_token('uid', ['uid']) if not headers: headers = {} headers['Authorization'] = 'Bearer {}'.format(token) if config.get('user'): headers['X-On-Behalf-Of'] = config['user'] api_url = config.get('deploy_api') url = urllib.parse.urljoin(api_url, path) response = method(url, headers=headers, timeout=DEFAULT_HTTP_TIMEOUT, **kwargs) if exit_on_error: if not (200 <= response.status_code < 400): error('Server returned HTTP error {} for {}:\n{}'.format(response.status_code, url, response.text)) exit(2) return response
def inspect_contents(config, team, artifact, tag, url, output, limit): '''List image contents (files in tar layers)''' set_pierone_url(config, url) token = get_token() tags = get_tags(config.get('url'), team, artifact, token) if not tag: tag = [t['name'] for t in tags] CHUNK_SIZE = 8192 TYPES = {b'5': 'D', b'0': ' '} rows = [] for t in tag: row = request(config.get('url'), '/v2/{}/{}/manifests/{}'.format(team, artifact, t), token).json() if row.get('layers'): layers = reversed([lay.get('digest') for lay in row.get('layers')]) else: layers = [lay.get('blobSum') for lay in row.get('fsLayers')] if layers: found = 0 for i, layer in enumerate(layers): layer_id = layer if layer_id: response = request(config.get('url'), '/v2/{}/{}/blobs/{}'.format(team, artifact, layer_id), token) with tempfile.NamedTemporaryFile(prefix='tmp-layer-', suffix='.tar') as fd: for chunk in response.iter_content(CHUNK_SIZE): fd.write(chunk) fd.flush() with tarfile.open(fd.name) as archive: has_member = False for member in archive.getmembers(): rows.append({'layer_index': i, 'layer_id': layer_id, 'type': TYPES.get(member.type), 'mode': oct(member.mode)[-4:], 'name': member.name, 'size': member.size, 'created_time': member.mtime}) has_member = True if has_member: found += 1 if found >= limit: break rows.sort(key=lambda row: (row['layer_index'], row['name'])) with OutputFormat(output): print_table(['layer_index', 'layer_id', 'mode', 'name', 'size', 'created_time'], rows, titles={'created_time': 'Created', 'layer_index': 'Idx'}, max_column_widths={'layer_id': 16})
def cli(ctx): config = stups_cli.config.load_config('github-maintainer-cli') emails = config.get('emails') token = config.get('github_access_token') if not 'configure'.startswith(ctx.invoked_subcommand or 'x'): if not emails: raise click.UsageError( 'No emails configured. Please run "configure".') if not token: raise click.UsageError( 'No GitHub access token configured. Please run "configure".') ctx.obj = config
def pull_requests(config, output): '''List pull requests''' token = config.get('github_access_token') repositories = get_repositories() rows = [] for issue in get_my_issues(token): pr = issue.get('pull_request') if pr: repo = repositories.get(issue['repository']['url']) if repo: r = request(session.get, pr['url'], token) pr = r.json() issue.update(**pr) issue['repository'] = repo['full_name'] issue['created_time'] = parse_time(issue['created_at']) issue['created_by'] = issue['user']['login'] issue['labels'] = ', '.join( [l['name'] for l in issue['labels']]) rows.append(issue) rows.sort(key=lambda x: (x['repository'], x['number'])) with OutputFormat(output): print_table([ 'repository', 'number', 'title', 'labels', 'mergeable', 'mergeable_state', 'created_time', 'created_by' ], rows)
def types(config, output): '''List violation types''' url = config.get('url') if not url: raise click.ClickException( 'Missing configuration URL. Please run "stups configure".') token = get_token() r = request(url, '/api/violation-types', token) r.raise_for_status() data = r.json() rows = [] for row in data: row['created_time'] = parse_time(row['created']) rows.append(row) rows.sort(key=lambda r: r['id']) with OutputFormat(output): print_table(['id', 'violation_severity', 'created_time', 'help_text'], rows, titles={ 'created_time': 'Created', 'violation_severity': 'Sev.' })
def wait_for_deployment(config, application, version, release, timeout, interval): '''Wait for all pods to become ready''' namespace = config.get('kubernetes_namespace') kubectl_login(config) deployment_name = '{}-{}-{}'.format(application, version, release) cutoff = time.time() + timeout while time.time() < cutoff: data = kubectl_get( namespace, 'pods', '-l', 'application={},version={},release={}'.format( application, version, release)) pods = data['items'] pods_ready = 0 for pod in pods: if pod['status'].get('phase') == 'Running': all_containers_ready = True for cont in pod['status'].get('containerStatuses', []): if not cont.get('ready'): all_containers_ready = False if all_containers_ready: pods_ready += 1 if pods and pods_ready >= len(pods): return info('Waiting up to {:.0f} more secs for deployment ' '{} ({}/{} pods ready)..'.format(cutoff - time.time(), deployment_name, pods_ready, len(pods))) time.sleep(interval) raise click.Abort()
def image(config, image, url, output): '''List tags that point to this image''' set_pierone_url(config, url) token = get_token() try: resp = request(config.get('url'), '/tags/{}'.format(image), token) except requests.HTTPError as error: status_code = error.response.status_code if status_code == 404: click.echo('Image {} not found'.format(image)) elif status_code == 412: click.echo('Prefix {} matches more than one image.'.format(image)) else: raise error return tags = resp.json() with OutputFormat(output): print_table(['team', 'artifact', 'name'], tags, titles={ 'name': 'Tag', 'artifact': 'Artifact', 'team': 'Team' })
def list_clusters(args): config = stups_cli.config.load_config(APP_NAME) cluster_registry = config.get('cluster_registry') if not cluster_registry: cluster_registry = fix_url(click.prompt('URL of Cluster Registry')) token = zign.api.get_token('kubectl', ['uid']) response = requests.get( '{}/kubernetes-clusters'.format(cluster_registry), params={'lifecycle_status': 'ready'}, headers={'Authorization': 'Bearer {}'.format(token)}, timeout=20) response.raise_for_status() data = response.json() rows = [] for cluster in data['items']: status = cluster.get('status', {}) version = status.get('current_version', '')[:7] if status.get('next_version') and status.get( 'current_version') != status.get('next_version'): version += ' (updating)' cluster['version'] = version rows.append(cluster) rows.sort(key=lambda c: (c['alias'], c['id'])) print_table('id alias environment channel version'.split(), rows)
def pull_requests(config, output): """List pull requests""" token = config.get("github_access_token") repositories = get_repositories() rows = [] for issue in get_my_issues(token): pr = issue.get("pull_request") if pr: repo = repositories.get(issue["repository"]["url"]) if repo: r = request(session.get, pr["url"], token) pr = r.json() issue.update(**pr) issue["repository"] = repo["full_name"] issue["created_time"] = parse_time(issue["created_at"]) issue["created_by"] = issue["user"]["login"] issue["labels"] = ", ".join([l["name"] for l in issue["labels"]]) rows.append(issue) rows.sort(key=lambda x: (x["repository"], x["number"])) with OutputFormat(output): print_table( ["repository", "number", "title", "labels", "mergeable", "mergeable_state", "created_time", "created_by"], rows, )
def cves(config, team, artifact, tag, url, output): '''List all CVE's found by Clair service for a specific artifact tag''' set_pierone_url(config, url) rows = [] token = get_token() for artifact_tag in get_tags(config.get('url'), team, artifact, token): if artifact_tag['name'] == tag: installed_software = get_clair_features(artifact_tag.get('clair_details'), token) for software_pkg in installed_software: for cve in software_pkg.get('Vulnerabilities', []): rows.append({ 'cve': cve['Name'], 'severity': cve['Severity'].upper(), 'affected_feature': '{}:{}'.format(software_pkg['Name'], software_pkg['Version']), 'fixing_feature': cve.get( 'FixedBy') and '{}:{}'.format(software_pkg['Name'], cve['FixedBy']), 'link': cve['Link'], }) severity_rating = ['CRITICAL', 'HIGH', 'MEDIUM', 'LOW', 'NEGLIGIBLE', 'UNKNOWN', 'PENDING'] rows.sort(key=lambda row: severity_rating.index(row['severity'])) with OutputFormat(output): titles = { 'cve': 'CVE', 'severity': 'Severity', 'affected_feature': 'Affected Feature', 'fixing_feature': 'Fixing Feature', 'link': 'Link' } print_table(['cve', 'severity', 'affected_feature', 'fixing_feature', 'link'], rows, titles=titles, styles=CVE_STYLES)
def tags(config, team: str, artifact, url, output, limit): '''List all tags for a given team''' set_pierone_url(config, url) token = get_token() if limit is None: # show 20 rows if artifact was given, else show only 3 limit = 20 if artifact else 3 if not artifact: artifact = get_artifacts(config.get('url'), team, token) if not artifact: raise click.UsageError('The Team you are looking for does not exist or ' 'we could not find any artifacts registered in Pierone! ' 'Please double check for spelling mistakes.') registry = config.get('url') if registry.startswith('https://'): registry = registry[8:] slice_from = - limit rows = [] for art in artifact: image = DockerImage(registry=registry, team=team, artifact=art, tag=None) try: tags = get_image_tags(image, token) except Unauthorized as e: raise click.ClickException(str(e)) else: if tags is None: raise click.UsageError('Artifact or Team does not exist! ' 'Please double check for spelling mistakes.') rows.extend(tags[slice_from:]) # sorts are guaranteed to be stable, i.e. tags will be sorted by time (as returned from REST service) rows.sort(key=lambda row: (row['team'], row['artifact'])) with OutputFormat(output): titles = { 'created_time': 'Created', 'created_by': 'By', 'severity_fix_available': 'Fixable CVE Severity', 'severity_no_fix_available': 'Unfixable CVE Severity' } print_table(['team', 'artifact', 'tag', 'created_time', 'created_by', 'severity_fix_available', 'severity_no_fix_available'], rows, titles=titles, styles=CVE_STYLES)
def get_current_replicas(config, application): '''Get current total number of replicas for given application''' namespace = config.get('kubernetes_namespace') data = kubectl_get(namespace, 'deployments', '-l', 'application={}'.format(application)) replicas = 0 for deployment in data['items']: replicas += deployment.get('status', {}).get('replicas', 0) print(replicas)
def artifacts(config, team, output): '''List all team artifacts''' token = get_token() result = get_artifacts(config.get('url'), team, token) rows = [{'team': team, 'artifact': name} for name in sorted(result)] with OutputFormat(output): print_table(['team', 'artifact'], rows)
def teams(config, output): '''List all teams having artifacts in Pier One''' token = get_token() r = request(config.get('url'), '/teams', token) rows = [{'name': name} for name in sorted(r.json())] with OutputFormat(output): print_table(['name'], rows)
def resolve_violations(config, comment, since, region, meta, remeta, limit, violation_ids, **kwargs): '''Resolve violations''' url = config.get('url') if not url: raise click.ClickException('Missing configuration URL. Please run "stups configure".') kwargs['accounts'] = kwargs.get('accounts') or config.get('accounts') if all([not violation_ids, not kwargs['accounts'], not kwargs['type'], not region]): raise click.UsageError('At least one of --accounts, --type, --region or --violation-ids must be specified') token = get_token() params = {'size': limit, 'sort': 'id,DESC'} params['from'] = parse_since(since) params['application-ids'] = kwargs.get('applications') params['application-version-ids'] = kwargs.get('application_versions') params.update(kwargs) data = {} if violation_ids: data['content'] = [] for violation_id in violation_ids.split(','): r = request(url, '/api/violations/{}'.format(violation_id), token, params=params) r.raise_for_status() data['content'].append(r.json()) else: r = request(url, '/api/violations', token, params=params) r.raise_for_status() data = r.json() for row in data['content']: if region and row['region'] != region: continue if meta and not meta_matches(row['meta_info'], meta): continue if remeta and not meta_matches_re(format_meta_info(row['meta_info']), remeta): continue if row['comment']: # already resolved, skip continue with Action('Resolving violation {}/{} {} {}..'.format(row['account_id'], row['region'], row['violation_type']['id'], row['id'])): r = session.post(url + '/api/violations/{}/resolution'.format(row['id']), data=comment, headers={'Authorization': 'Bearer {}'.format(token)}) r.raise_for_status()
def apply_autoscaling(config, template, application, version, release, parameter, execute): '''Apply Horizontal Pod Autoscaling to current deployment''' context = parse_parameters(parameter) context['application'] = application context['version'] = version context['release'] = release data = _render_template(template, context) cluster_id = config.get('kubernetes_cluster') namespace = config.get('kubernetes_namespace') path = '/kubernetes-clusters/{}/namespaces/{}/resources'.format(cluster_id, namespace) response = request(config, requests.post, path, json=data) change_request_id = response.json()['id'] if execute: approve_and_execute(config, change_request_id) else: print(change_request_id)
def artifacts(config, team, url, output): '''List all team artifacts''' set_pierone_url(config, url) token = get_token() result = get_artifacts(config.get('url'), team, token) rows = [{'team': team, 'artifact': name} for name in sorted(result)] with OutputFormat(output): print_table(['team', 'artifact'], rows)
def teams(config, output, url): '''List all teams having artifacts in Pier One''' set_pierone_url(config, url) token = get_token() r = request(config.get('url'), '/teams', token) rows = [{'name': name} for name in sorted(r.json())] with OutputFormat(output): print_table(['name'], rows)
def resolve_violations(config, comment, since, region, meta, remeta, limit, violation_ids, **kwargs): '''Resolve violations''' url = config.get('url') if not url: raise click.ClickException('Missing configuration URL. Please run "stups configure".') kwargs['accounts'] = kwargs.get('accounts') or config.get('accounts') if all([not violation_ids, not kwargs['accounts'], not kwargs['type'], not region]): raise click.UsageError('At least one of --accounts, --type, --region or --violation-ids must be specified') token = get_token() params = {'size': limit, 'sort': 'id,DESC'} params['from'] = parse_since(since) params.update(kwargs) data = {} if violation_ids: data['content'] = [] for violation_id in violation_ids.split(','): r = request(url, '/api/violations/{}'.format(violation_id), token, params=params) r.raise_for_status() data['content'].append(r.json()) else: r = request(url, '/api/violations', token, params=params) r.raise_for_status() data = r.json() for row in data['content']: if region and row['region'] != region: continue if meta and not meta_matches(row['meta_info'], meta): continue if remeta and not meta_matches_re(format_meta_info(row['meta_info']), remeta): continue if row['comment']: # already resolved, skip continue with Action('Resolving violation {}/{} {} {}..'.format(row['account_id'], row['region'], row['violation_type']['id'], row['id'])): r = session.post(url + '/api/violations/{}/resolution'.format(row['id']), data=comment, headers={'Authorization': 'Bearer {}'.format(token)}) r.raise_for_status()
def promote_deployment(config, application, version, release, stage, execute): '''Promote deployment to new stage''' namespace = config.get('kubernetes_namespace') deployment_name = '{}-{}-{}'.format(application, version, release) info('Promoting deployment {} to {} stage..'.format(deployment_name, stage)) cluster_id = config.get('kubernetes_cluster') namespace = config.get('kubernetes_namespace') path = '/kubernetes-clusters/{}/namespaces/{}/resources'.format(cluster_id, namespace) resources_update = ResourcesUpdate() resources_update.set_label(deployment_name, 'stage', stage) response = request(config, requests.patch, path, json=resources_update.to_dict()) change_request_id = response.json()['id'] if execute: approve_and_execute(config, change_request_id) else: print(change_request_id)
def encrypt(config): '''Encrypt plain text (read from stdin) for deployment configuration''' plain_text = sys.stdin.read() api_url = config.get('deploy_api') url = '{}/secrets'.format(api_url) response = request(config, requests.post, url, json={'plaintext': plain_text}) print("deployment-secret:{}".format(response.json()['data']))
def latest(config, team, artifact, output): '''Get latest tag/version of a specific artifact''' # validate that the token exists! get_token() registry = config.get('url') if registry.startswith('https://'): registry = registry[8:] image = DockerImage(registry=registry, team=team, artifact=artifact, tag=None) print(get_latest_tag('pierone', image))
def tags(config, team, artifact, output): '''List all tags''' token = get_token() if not artifact: artifact = get_artifacts(config.get('url'), team, token) rows = [] for art in artifact: r = get_tags(config.get('url'), team, art, token) rows.extend([{'team': team, 'artifact': art, 'tag': row['name'], 'created_by': row['created_by'], 'created_time': parse_time(row['created'])} for row in r]) rows.sort(key=lambda row: (row['team'], row['artifact'], row['tag'])) with OutputFormat(output): print_table(['team', 'artifact', 'tag', 'created_time', 'created_by'], rows, titles={'created_time': 'Created', 'created_by': 'By'})
def tags(config, team: str, artifact, url, output, limit): '''List all tags for a given team''' set_pierone_url(config, url) token = get_token() if limit is None: # show 20 rows if artifact was given, else show only 3 limit = 20 if artifact else 3 if not artifact: artifact = get_artifacts(config.get('url'), team, token) slice_from = - limit rows = [] for art in artifact: r = get_tags(config.get('url'), team, art, token) rows.extend([{'team': team, 'artifact': art, 'tag': row['name'], 'created_by': row['created_by'], 'created_time': parse_time(row['created']), 'severity_fix_available': parse_severity( row.get('severity_fix_available'), row.get('clair_id', False)), 'severity_no_fix_available': parse_severity( row.get('severity_no_fix_available'), row.get('clair_id', False))} for row in r[slice_from:]]) # sorts are guaranteed to be stable, i.e. tags will be sorted by time (as returned from REST service) rows.sort(key=lambda row: (row['team'], row['artifact'])) with OutputFormat(output): titles = { 'created_time': 'Created', 'created_by': 'By', 'severity_fix_available': 'Fixable CVE Severity', 'severity_no_fix_available': 'Unfixable CVE Severity' } print_table(['team', 'artifact', 'tag', 'created_time', 'created_by', 'severity_fix_available', 'severity_no_fix_available'], rows, titles=titles, styles=CVE_STYLES)
def get_named_token(scope, realm, name, user, password, url=None, insecure=False, refresh=False, use_keyring=True, prompt=False): '''get named access token, return existing if still valid''' if name and not refresh: existing_token = get_existing_token(name) if existing_token: return existing_token config = get_config() url = url or config.get('url') while not url and prompt: url = click.prompt('Please enter the OAuth access token service URL') if not url.startswith('http'): url = 'https://{}'.format(url) try: requests.get(url, timeout=5, verify=not insecure) except: error('Could not reach {}'.format(url)) url = None config['url'] = url stups_cli.config.store_config(config, 'zign') password = password or keyring.get_password(KEYRING_KEY, user) while True: if not password and prompt: password = click.prompt('Password for {}'.format(user), hide_input=True) try: result = get_new_token(realm, scope, user, password, url=url, insecure=insecure) break except AuthenticationFailed as e: if prompt: error(e) info('Please check your username and password and try again.') password = None else: raise if result and use_keyring: keyring.set_password(KEYRING_KEY, user, password) if name: store_token(name, result) return result
def get_repositories(): config = stups_cli.config.load_config("github-maintainer-cli") my_emails = config.get("emails") my_repos = {} for url, repo in get_all_repositories().items(): for maintainer in repo["maintainers"]: name, _, email = maintainer.strip().partition("<") email = email.strip().rstrip(">") if email in my_emails: my_repos[url] = repo return my_repos
def get_repositories(): config = stups_cli.config.load_config('github-maintainer-cli') my_emails = config.get('emails') my_repos = {} for url, repo in get_all_repositories().items(): for maintainer in repo['maintainers']: name, _, email = maintainer.strip().partition('<') email = email.strip().rstrip('>') if email in my_emails: my_repos[url] = repo return my_repos
def get_token(name: str, scopes: list): '''Get an OAuth token, either from Token Service or directly from OAuth provider (using the Python tokens library)''' # first try if a token exists already token = get_existing_token(name) if token: return token['access_token'] tokens.manage(name, scopes) try: access_token = tokens.get(name) except tokens.ConfigurationError: access_token = None except tokens.InvalidCredentialsError: access_token = None if access_token: return access_token config = get_config() user = config.get('user') or os.getenv('ZIGN_USER') or os.getenv('USER') if not user: raise ConfigurationError('Missing OAuth username. ' + 'Either set "user" in configuration file or ZIGN_USER environment variable.') if not config.get('url'): raise ConfigurationError('Missing OAuth access token service URL. ' + 'Please set "url" in configuration file.') password = os.getenv('ZIGN_PASSWORD') or keyring.get_password(KEYRING_KEY, user) token = get_new_token(config.get('realm'), scopes, user, password, url=config.get('url'), insecure=config.get('insecure')) if token: store_token(name, token) return token['access_token']
def latest(config, team, artifact, url, output): '''Get latest tag/version of a specific artifact''' # validate that the token exists! set_pierone_url(config, url) token = get_token() registry = get_registry(config.get('url')) image = DockerImage(registry=registry, team=team, artifact=artifact, tag=None) latest_tag = get_latest_tag(image, token) if latest_tag: print(latest_tag) else: raise PieroneException('Latest tag not found')
def _scale_deployment(config, name, namespace, replicas, execute): '''Scale a single deployment''' info('Scaling deployment {} to {} replicas..'.format(name, replicas)) resources_update = ResourcesUpdate() resources_update.set_number_of_replicas(name, replicas) cluster_id = config.get('kubernetes_cluster') path = '/kubernetes-clusters/{}/namespaces/{}/resources'.format(cluster_id, namespace) response = request(config, requests.patch, path, json=resources_update.to_dict()) change_request_id = response.json()['id'] if execute: approve_and_execute(config, change_request_id) else: print(change_request_id)
def repositories(config): '''List repositories''' token = config.get('github_access_token') repositories = get_repositories() for issue in get_my_issues(token): repo = repositories.get(issue['repository']['url']) if repo: repo['open_issues'] = repo.get('open_issues', 0) + 1 if issue.get('pull_request'): repo['open_pull_requests'] = repo.get('open_pull_requests', 0) + 1 rows = [] for url, repo in sorted(repositories.items()): rows.append(repo) print_table(['full_name', 'stargazers_count', 'forks_count', 'open_issues', 'open_pull_requests'], rows)
def issues(config): '''List open issues''' token = config.get('github_access_token') repositories = get_repositories() rows = [] for issue in get_my_issues(token): if not issue.get('pull_request'): repo = repositories.get(issue['repository']['url']) if repo: issue['repository'] = repo['full_name'] issue['created_time'] = parse_time(issue['created_at']) issue['created_by'] = issue['user']['login'] issue['labels'] = ', '.join([l['name'] for l in issue['labels']]) rows.append(issue) rows.sort(key=lambda x: (x['repository'], x['number'])) print_table(['repository', 'number', 'title', 'labels', 'created_time', 'created_by'], rows)
def set_pierone_url(config: dict, url: str) -> None: '''Read Pier One URL from cli, from config file or from stdin.''' url = url or config.get('url') while not url: url = click.prompt('Please enter the Pier One URL', type=UrlType()) try: requests.get(url, timeout=5) except: error('Could not reach {}'.format(url)) url = None if '://' not in url: # issue 63: gracefully handle URLs without scheme url = 'https://{}'.format(url) config['url'] = url return url
def issues(config, output): """List open issues""" token = config.get("github_access_token") repositories = get_repositories() rows = [] for issue in get_my_issues(token): if not issue.get("pull_request"): repo = repositories.get(issue["repository"]["url"]) if repo: issue["repository"] = repo["full_name"] issue["created_time"] = parse_time(issue["created_at"]) issue["created_by"] = issue["user"]["login"] issue["labels"] = ", ".join([l["name"] for l in issue["labels"]]) rows.append(issue) rows.sort(key=lambda x: (x["repository"], x["number"])) with OutputFormat(output): print_table(["repository", "number", "title", "labels", "created_time", "created_by"], rows)
def image(config, image, output): '''List tags that point to this image''' token = get_token() resp = request(config.get('url'), '/tags/{}'.format(image), token) if resp.status_code == 404: click.echo('Image {} not found'.format(image)) return if resp.status_code == 412: click.echo('Prefix {} matches more than one image.'.format(image)) return tags = resp.json() with OutputFormat(output): print_table(['team', 'artifact', 'name'], tags, titles={'name': 'Tag', 'artifact': 'Artifact', 'team': 'Team'})
def types(config, output): '''List violation types''' url = config.get('url') if not url: raise click.ClickException('Missing configuration URL. Please run "stups configure".') token = get_token() r = request(url, '/api/violation-types', token) r.raise_for_status() data = r.json() rows = [] for row in data: row['created_time'] = parse_time(row['created']) rows.append(row) rows.sort(key=lambda r: r['id']) with OutputFormat(output): print_table(['id', 'violation_severity', 'created_time', 'help_text'], rows, titles={'created_time': 'Created', 'violation_severity': 'Sev.'})
def get_new_token(realm: str, scope: list, user, password, url=None, insecure=False): if not url: config = get_config() url = config.get('url') params = {'json': 'true'} if realm: params['realm'] = realm if scope: params['scope'] = ' '.join(scope) response = requests.get(url, params=params, auth=(user, password), verify=not insecure) if response.status_code == 401: raise AuthenticationFailed('Token Service returned {}'.format(response.text)) elif response.status_code != 200: raise ServerError('Token Service returned HTTP status {}: {}'.format(response.status_code, response.text)) try: json_data = response.json() except: raise ServerError('Token Service returned invalid JSON data') if not json_data.get('access_token'): raise ServerError('Token Service returned invalid JSON (access_token missing)') return json_data
def image(config, image, url, output): '''List tags that point to this image''' set_pierone_url(config, url) token = get_token() try: resp = request(config.get('url'), '/tags/{}'.format(image), token) except requests.HTTPError as error: status_code = error.response.status_code if status_code == 404: click.echo('Image {} not found'.format(image)) elif status_code == 412: click.echo('Prefix {} matches more than one image.'.format(image)) else: raise error return tags = resp.json() with OutputFormat(output): print_table(['team', 'artifact', 'name'], tags, titles={'name': 'Tag', 'artifact': 'Artifact', 'team': 'Team'})
def repositories(config, show_issues, output): """List repositories""" token = config.get("github_access_token") repositories = get_repositories() if show_issues: for issue in get_my_issues(token): repo = repositories.get(issue["repository"]["url"]) if repo: repo["open_issues"] = repo.get("open_issues", 0) + 1 if issue.get("pull_request"): repo["open_pull_requests"] = repo.get("open_pull_requests", 0) + 1 rows = [] for url, repo in sorted(repositories.items()): rows.append(repo) with OutputFormat(output): columns = ["full_name", "stargazers_count", "forks_count"] if show_issues: columns += ["open_issues", "open_pull_requests"] print_table(columns, rows)