def deploy(self, environment, workspace, image_name): env_deploy = { 'AWS_DEFAULT_REGION': get_workspace_value(workspace, 'aws.region', '', True), 'APP_NAME': get_config_value('app.name'), 'CLUSTER_NAME': get_workspace_value(workspace, 'ecs_cluster_name', '', True), 'CONTAINER_PORT': get_config_value('app.port'), 'IMAGE_NAME': image_name, } workspace_environments = get_workspace_value(workspace, 'environment', []) for env_dict in workspace_environments: env_deploy = {**env_deploy, **env_dict} ecs_task_definition_file = get_config_value('app.ecs_task_definition_file', 'task-definition.tpl.json') if not os.path.isfile(ecs_task_definition_file): click.echo('ECS task definition file not found (%s)' % ecs_task_definition_file) raise SystemExit envs = environment.get_env() timeout = get_workspace_value(workspace, 'deploy_timeout', '0') if timeout != '0': env_deploy['DEPLOY_TIMEOUT'] = timeout envs.update(env_deploy) container.create( image=ECS_DEPLOY_IMAGE, environment=envs, volumes=[ './%s:/work/task-definition.tpl.json' % (ecs_task_definition_file) ] )
def dnx_assume(aws_role, aws_account_id): aws_role = aws_role or get_config_value( 'plugins.dnx-assume.parameters.aws_role') aws_account_id = aws_account_id or get_config_value( 'plugins.dnx-assume.parameters.aws_account_id') credentials = EnvironmentAws().build(aws_role=aws_role, aws_account_id=aws_account_id, aws_assume_role='true').get_env() create_secrets(credentials, CLI_ROOT + '/secrets')
def init(workspace, aws_role, check_modules): envs = environment.build(workspace, aws_role).get_env() container.create(image=TERRAFORM_IMAGE, command='init', volumes=['.:/work'], environment=envs) command_create_workspace = 'workspace new %s' % (envs['WORKSPACE']) container.create(image=TERRAFORM_IMAGE, command=command_create_workspace, volumes=['.:/work'], environment=envs) command_select_workspace = 'workspace "select" %s' % (envs['WORKSPACE']) container.create(image=TERRAFORM_IMAGE, command=command_select_workspace, volumes=['.:/work'], environment=envs) check_modules = str2bool( get_config_value('config.check_modules', str(check_modules))) if check_modules: terraform_modules_check()
def kube_shell(cluster_name, workspace, aws_role, aws_assume_role, aws_default_region): cluster_name = cluster_name or get_config_value('plugins.kube.parameters.cluster_name') aws_default_region = aws_default_region or get_config_value('plugins.kube.parameters.aws_default_region') aws_assume_role = aws_assume_role or get_config_value('plugins.kube.parameters.aws_assume_role', 'false') envs = environment.build(workspace=workspace, aws_role=aws_role, aws_assume_role=aws_assume_role).get_env() envs['KUBECONFIG'] = get_config_value('plugins.kube.parameters.kubeconfig', '') or '/work/.kube-config' get_kube_config(aws_default_region, cluster_name, envs) entrypoint = '/bin/bash' container.create( image=KUBE_TOOLS_IMAGE, entrypoint=entrypoint, ports=['8001:8001'], volumes=['.:/work'], environment=envs )
def get_kube_config(aws_default_region, cluster_name, envs): kubeconfig = get_config_value('plugins.kube.parameters.kubeconfig', '') or '/work/.kube-config' command = 'eks --region %s update-kubeconfig --name %s --kubeconfig %s' % (aws_default_region, cluster_name, kubeconfig) container.create( image=AWS_IMAGE, command=command, volumes=['.:/work'], environment=envs )
def kube_proxy(cluster_name, workspace, aws_role, aws_assume_role, aws_default_region, port): cluster_name = cluster_name or get_config_value('plugins.kube.parameters.cluster_name') aws_default_region = aws_default_region or get_config_value('plugins.kube.parameters.aws_default_region') aws_assume_role = aws_assume_role or get_config_value('plugins.kube.parameters.aws_assume_role', 'false') envs = environment.build(workspace=workspace, aws_role=aws_role, aws_assume_role=aws_assume_role).get_env() envs['KUBECONFIG'] = get_config_value('plugins.kube.parameters.kubeconfig', '') or '/work/.kube-config' get_kube_config(aws_default_region, cluster_name, envs) entrypoint = 'kubectl' command = 'proxy --address 0.0.0.0 --port %s' % port.split(":")[1] container.create( image=KUBE_TOOLS_IMAGE, entrypoint=entrypoint, command=command, ports=[port], volumes=['.:/work'], environment=envs )
def helm(args, cluster_name, workspace, aws_role, aws_assume_role, aws_default_region): cluster_name = cluster_name or get_config_value('plugins.kube.parameters.cluster_name') aws_default_region = aws_default_region or get_config_value('plugins.kube.parameters.aws_default_region') aws_assume_role = aws_assume_role or get_config_value('plugins.kube.parameters.aws_assume_role', 'false') envs = environment.build(workspace=workspace, aws_role=aws_role, aws_assume_role=aws_assume_role).get_env() envs['KUBECONFIG'] = get_config_value('plugins.kube.parameters.kubeconfig', '') or '/work/.kube-config' get_kube_config(aws_default_region, cluster_name, envs) entrypoint = 'helm' command = '' for arg in args: command += '%s ' % (arg) container.create( image=KUBE_TOOLS_IMAGE, command=command, entrypoint=entrypoint, volumes=['.:/work'], environment=envs )
def deploy_ecs(workspace, build_version): app_deploy = app_deploy_factory( get_workspace_value(workspace, 'type', 'ecs', True)) app_registry = app_registry_factory( get_config_value('app.docker.registry_type', 'ecr')) environment.build(workspace) image_name = app_registry.get_image_name(build_version) click.echo('Deploying %s to %s' % (image_name, workspace)) app_deploy.deploy(environment, workspace, image_name)
def docker_login(self, environment): aws_account_id = get_config_value('app.docker.registry_options.ecr_aws_account_id') aws_role = get_config_value('app.docker.registry_options.ecr_aws_role') aws_assume_role = get_config_value('app.docker.registry_options.ecr_aws_assume_role', 'false').lower() envs = environment.build( aws_assume_role=aws_assume_role, aws_role=aws_role, aws_account_id=aws_account_id ).get_env() docker_get_login = container.create( image=AWS_IMAGE, command="ecr get-login --no-include-email --registry-ids %s --region %s" % ( self.ecr_aws_account_id, self.ecr_aws_region ), environment=envs, tty=False ) if ' '.join(docker_get_login.split()[:4]) == 'Unable to locate credentials.': click.echo( click.style('ERROR: ', fg='red') + docker_get_login ) raise SystemExit docker_login_command_parts = docker_get_login.strip().split(' ') subprocess.call(list(filter(None, docker_login_command_parts))) # try: # output = docker_client.login(username=docker_login_username, # password=docker_login_password, registry=docker_login_endpoint) # except APIError: # click.echo('Error with docker login: ', docker_get_login.strip()) # raise SystemExit click.echo("Docker login succeeded: %s.dkr.ecr.%s.amazonaws.com" % (self.ecr_aws_account_id, self.ecr_aws_region))
def deploy(self, environments): workspace = environments.get('WORKSPACE', 'default') env_deploy = { 'AWS_DEFAULT_REGION': get_workspace_value(workspace, 'aws.region', '', True) } environments.update(env_deploy) s3_bucket_name = (get_config_value('app.s3_bucket', '') or get_workspace_value(workspace, 'app.s3_bucket', '', True)) distribution_id = (get_config_value('app.distribution_id', '') or get_workspace_value( workspace, 'app.distribution_id', '', True)) src_dir = (get_config_value('app.src', '') or get_workspace_value(workspace, 'app.src', '', True)) if not os.path.isdir(src_dir): click.echo('Source folder not found (%s)' % src_dir) raise SystemExit command_s3_sync = """s3 sync %s s3://%s --delete --cache-control \ max-age=31536000 --acl public-read""" % ( src_dir, s3_bucket_name) container.create(command=command_s3_sync, image=AWS_IMAGE, environment=environments, volumes=['.:/work']) if distribution_id: command_cloudfront = 'cloudfront create-invalidation --distribution-id %s --paths "/*"' % ( distribution_id) container.create(command=command_cloudfront, image=AWS_IMAGE, environment=environments, volumes=['.:/work'])
def load_plugins(): installed_packages = get_installed_packages() garbage_packages = [ i for i in installed_packages if 'one-cli-plugin-' in i ] try: with open(CONFIG_FILE) as file: docs = yaml.load(file, Loader=yaml.BaseLoader) for key, value in docs['plugins'].items(): package = get_config_value('plugins.' + key + '.package') module = get_config_value('plugins.' + key + '.module') if package not in installed_packages: click.echo('Installing plugin %s.' % package) install(package) click.echo('Plugin %s successfully installed.\n' % package) else: garbage_packages.remove(package) getattr(importlib.import_module(module), '__init__')() file.close() except KeyError: pass except AttributeError: click.echo( click.style('ERROR ', fg='red') + 'Plugin attribute error.\n') raise SystemExit except FileNotFoundError: click.echo( click.style('ERROR ', fg='red') + 'Config file %s not found.\n' % CONFIG_FILE) except Exception: click.echo(click.style('ERROR ', fg='red') + 'Unexpected error.\n') raise SystemExit cleanup(garbage_packages)
def get_images(self): images = { 'terraform': TERRAFORM_IMAGE, 'gsuite': GSUITE_AUTH_IMAGE, 'azure': AZURE_AUTH_IMAGE, 'okta': OKTA_AUTH_IMAGE, 'aws': AWS_IMAGE, 'aws_v2': AWS_V2_IMAGE, 'shell': SHELL_IMAGE, 'ecs_deploy': ECS_DEPLOY_IMAGE } for key, value in images.items(): images.update({key: get_config_value('images.' + key, value)}) return images
def docker_push(build_version): app_registry = app_registry_factory( get_config_value('app.docker.registry_type', 'ecr')) app_registry.docker_push(build_version)
def docker_login(): app_registry = app_registry_factory( get_config_value('app.docker.registry_type', 'ecr')) app_registry.docker_login(environment)
def __init__(self): self.name = get_config_value('app.name') self.image_name = get_config_value('app.docker.image_name', get_config_value('app.name')) self.dockerfile = get_config_value('app.docker.file', 'Dockerfile') self.build_cmd_args = get_config_value('app.docker.build_cmd_args', '')
def __init__(self): super().__init__() self.name = get_config_value('app.name') self.ecr_aws_region = get_config_value('app.docker.registry_options.ecr_aws_region') self.ecr_aws_account_id = get_config_value('app.docker.registry_options.ecr_aws_account_id')