def aws_assume_role(self, aws_role, aws_account_id): click.echo('Assuming role %s at %s' % (aws_role, aws_account_id)) container = Container() image = Image() AWS_IMAGE = image.get_image('aws') envs = { 'AWS_ROLE': aws_role, 'AWS_ACCOUNT_ID': aws_account_id, } envs.update(self.env_auth) command = 'assume-role.sh' output = container.create(image=AWS_IMAGE, entrypoint='/bin/bash -c', command=command, volumes=['.:/work'], environment=envs, tty=False, stdin_open=False) self.env_assume = parse_env('\n'.join(output.splitlines())) return self.env_assume
import click from one.one import cli from one.docker.container import Container from one.docker.image import Image from one.utils.environment.aws import EnvironmentAws from one.utils.config import get_config_value container = Container() image = Image() environment = EnvironmentAws() AWS_IMAGE = image.get_image('aws') KUBE_TOOLS_IMAGE = 'dnxsolutions/docker-kube-tools:0.3.2' def __init__(): cli.add_command(kubectl) cli.add_command(helm) cli.add_command(kube_shell) cli.add_command(kube_proxy) def get_kube_config(aws_default_region, cluster_name, envs): kubeconfig = get_config_value('plugins.kube.parameters.kubeconfig', '') or '/work/.kube-config' command = 'eks --region %s update-kubeconfig --name %s --kubeconfig %s' % (aws_default_region, cluster_name, kubeconfig) container.create( image=AWS_IMAGE, command=command, volumes=['.:/work'], environment=envs )
import click from PyInquirer import prompt from one.utils.environment.common import get_credentials_file, get_config_file, get_idp_file, write_config from one.utils.prompt import style from one.docker.image import Image from one.docker.container import Container from one.__init__ import CLI_ROOT from one.prompt.idp import PROVIDER_QUESTIONS, GSUITE_QUESTIONS, AZURE_QUESTIONS, OKTA_QUESTIONS from one.prompt.auth import AWS_ACCESS_KEY_QUESTIONS image = Image() container = Container() def configure_idp(): provider_answer = prompt(PROVIDER_QUESTIONS, style=style) if not provider_answer: raise SystemExit if provider_answer['provider'] == 'Google G Suite SSO': configure_gsuite() elif provider_answer['provider'] == 'Microsoft Azure SSO': configure_azure() elif provider_answer['provider'] == 'Okta SSO': configure_okta() elif provider_answer['provider'] == 'AWS SSO': configure_aws_sso() elif provider_answer['provider'] == 'AWS IAM user': configure_iam_user() else: raise SystemExit
import click from one.docker.container import Container from one.docker.image import Image from one.utils.environment.aws import EnvironmentAws from one.utils.config import get_config_value, get_workspace_value from one.utils.app.common import app_deploy_factory, app_registry_factory image = Image() container = Container() environment = EnvironmentAws() ECS_DEPLOY_IMAGE = image.get_image('ecs_deploy') AWS_IMAGE = image.get_image('aws') @click.group(help='Group of app commands wrapped inside docker.') def app(): pass @app.command(name='docker-build', help='Build docker image for deployment.') @click.option('--build-version', default='latest', help='Build version, used as tag for docker image.') def docker_build(build_version): app_registry = app_registry_factory( get_config_value('app.docker.registry_type', 'ecr')) app_registry.docker_build(build_version) @app.command(name='docker-login', help='Login into docker registry.')
import click from one.docker.container import Container from one.docker.image import Image from one.utils.environment.aws import EnvironmentAws container = Container() image = Image() environment = EnvironmentAws() AWS_IMAGE = image.get_image('aws') AWS_V2_IMAGE = image.get_image('aws_v2') @click.command(help='AWS CLI alias.') @click.argument('args', nargs=-1) @click.option('-w', '--workspace', default=None, type=str, help='Workspace to use.') @click.option('-r', '--aws-role', 'aws_role', default=None, type=str, help='AWS role to use.') def aws(args, workspace, aws_role): envs = environment.build(workspace, aws_role).get_env() command = '' for arg in args: command += '%s ' % (arg) container.create(image=AWS_IMAGE,
import click import os from one.utils.config import get_config_value, get_workspace_value from one.docker.container import Container from one.docker.image import Image from one.utils.app import App ECS_DEPLOY_IMAGE = Image().get_image('ecs_deploy') container = Container() class AppDeployEcs(App): def __init__(self): super().__init__() def deploy(self, environment, workspace, image_name): env_deploy = { 'AWS_DEFAULT_REGION': get_workspace_value(workspace, 'aws.region', '', True), 'APP_NAME': get_config_value('app.name'), 'CLUSTER_NAME': get_workspace_value(workspace, 'ecs_cluster_name', '', True), 'CONTAINER_PORT': get_config_value('app.port'), 'IMAGE_NAME': image_name, } workspace_environments = get_workspace_value(workspace, 'environment', []) for env_dict in workspace_environments: env_deploy = {**env_deploy, **env_dict} ecs_task_definition_file = get_config_value('app.ecs_task_definition_file', 'task-definition.tpl.json') if not os.path.isfile(ecs_task_definition_file):
import click import yaml from one.__init__ import CONFIG_FILE from one.docker.container import Container from one.docker.image import Image from one.utils.environment.aws import EnvironmentAws from one.one import cli container = Container() image = Image() SHELL_IMAGE = image.get_image('shell') def make_callback(image, command, ports, entrypoint, volumes, environment): def callback(): secrets_envs = EnvironmentAws().build( workspace='default', aws_account_id='none', aws_role='none', aws_assume_role='false').get_env() environment.update(secrets_envs) container.create(image=image, command=command, ports=ports, entrypoint=entrypoint, volumes=volumes, environment=environment) return callback
import click import os from one.utils.config import get_workspace_value, get_config_value from one.docker.container import Container from one.docker.image import Image from one.utils.app import App AWS_IMAGE = Image().get_image('aws') container = Container() class AppDeployStatic(App): def __init__(self): super().__init__() def deploy(self, environments): workspace = environments.get('WORKSPACE', 'default') env_deploy = { 'AWS_DEFAULT_REGION': get_workspace_value(workspace, 'aws.region', '', True) } environments.update(env_deploy) s3_bucket_name = (get_config_value('app.s3_bucket', '') or get_workspace_value(workspace, 'app.s3_bucket', '', True)) distribution_id = (get_config_value('app.distribution_id', '') or get_workspace_value( workspace, 'app.distribution_id', '', True)) src_dir = (get_config_value('app.src', '')
def create(self, image='', command=None, entrypoint=None, volumes=[], ports=[], working_dir='/work', stdin_open=True, tty=True, environment={}): Image().check_image(image) host_config = None container_volumes = [] binds = [] for volume in volumes: volume_parts = volume.split(':') if volume_parts[0][0] == '.': volume_parts[0] = os.getcwd() + volume_parts[0][1:] container_volumes.append(volume_parts[1]) binds.append(':'.join(volume_parts)) port_bindings = {} container_ports = [] try: for port in ports: port_parts = port.split(':') port_bindings[port_parts[0]] = port_parts[1] container_ports.append(port_parts[0]) except IndexError: click.echo(click.style('ERROR ', fg='red') + 'Ports mistyped.\n') return except Exception: click.echo( click.style('ERROR ', fg='red') + 'Unexpected error while loading ports.\n') raise host_config = client.create_host_config(binds=binds, port_bindings=port_bindings) container = client.create_container(image, command=command, entrypoint=entrypoint, stdin_open=stdin_open, tty=tty, ports=container_ports, environment=environment, working_dir=working_dir, volumes=container_volumes, host_config=host_config) if tty: dockerpty.start(client, container) else: client.start(container=container.get('Id')) client.wait(container=container.get('Id')) logs = client.logs(container['Id']) client.remove_container(container) return logs.decode('utf8')
import click from one.docker.container import Container from one.docker.image import Image from one.utils.environment.aws import EnvironmentAws from one.utils.terraform_modules import terraform_modules_check from one.utils.config import get_config_value, str2bool image = Image() container = Container() environment = EnvironmentAws() TERRAFORM_IMAGE = image.get_image('terraform') @click.group(help='Group of terraform commands wrapped inside docker.') def terraform(): pass @terraform.command(help='Run terraform init inside the docker container.') @click.option('-w', '--workspace', default=None, type=str, help='Workspace to use.') @click.option('-r', '--aws-role', 'aws_role', default=None, type=str, help='AWS role to use.') @click.option('-c',