def __init__(self, runner, cluster_name, cluster_zone, kops_state_store, **_): self.runner = Runner( cmd.AWS_ECR_GET_CREDENTIALS.format(region=cluster_zone), cmd.AWS_KOPS_EXPORT_CONFIG.format(cluster=cluster_name, store=kops_state_store), runner=runner)
class GoogleContainerRegistry(ContainerRegistry): alias = 'gcr' name = 'Google container registry' def __init__(self, runner, cluster_name, cluster_zone, **_): self.runner = Runner( cmd.GCLOUD_CONTAINER_CLUSTER_GET_CREDENTIALS.format( cluster=cluster_name, zone=cluster_zone), runner=runner) def push(self, image): self.runner.start(cmd.GCLOUD_DOCKER_PUSH.format(image))
class AwsContainerRegistry(ContainerRegistry): alias = 'ecr' name = 'Amazon EC2 Container Registry' def __init__(self, runner, cluster_name, cluster_zone, kops_state_store, **_): self.runner = Runner( cmd.AWS_ECR_GET_CREDENTIALS.format(region=cluster_zone), cmd.AWS_KOPS_EXPORT_CONFIG.format(cluster=cluster_name, store=kops_state_store), runner=runner) def push(self, image): self.runner.start(cmd.AWS_DOCKER_PUSH.format(image))
def __init__(self, runner, **_): self.runner = Runner(cmd.KUBECTL_CONFIG_MINIKUBE, runner=runner)
def __init__(self, runner, cluster_name, cluster_zone, **_): self.runner = Runner( cmd.GCLOUD_CONTAINER_CLUSTER_GET_CREDENTIALS.format( cluster=cluster_name, zone=cluster_zone), runner=runner)
def __init__(self, options): self.load_options(options) self.kube_api = KubeApi() self.runner = Runner(self.get_docker_daemon()) self.registry = registries.load(self.runner, options)
class BgKube(object): def __init__(self, options): self.load_options(options) self.kube_api = KubeApi() self.runner = Runner(self.get_docker_daemon()) self.registry = registries.load(self.runner, options) @property def is_minikube(self): return self.container_registry == 'local' def get_docker_daemon(self): return cmd.MINIKUBE_DOCKER_ENV if self.is_minikube else cmd.DOCKERMACHINE_ENV.format( self.docker_machine_name) def load_options(self, options): for opt in self.required: setattr(self, opt, require(options, opt)) for opt in self.optional: setattr(self, opt, getattr(options, opt, None) or getattr(self, opt)) @log('Building image {image_name} using {dockerfile}...') def build(self): tag = timestamp() command = [ cmd.DOCKER_BUILD.format( context=self.context, dockerfile=self.dockerfile, image=self.image_name, tag=tag, ) ] if self.docker_build_args: command.append(' '.join( '--build-arg {}'.format(b) for b in self.docker_build_args.split(' '))) self.runner.start(' '.join(command)) return tag @log('Pushing image {image_name}:{tag} to {registry}...') def push(self, tag): self.registry.push('{}:{}'.format(self.image_name, tag)) @log('Applying {_} using config: {filename}...') def apply(self, _, filename, tag=None, color=''): return self.kube_api.apply(filename, self.env_file, TAG=tag, COLOR=color, ENV_FILE=self.env_file) def pod_find(self, tag): results = [pod for pod in self.kube_api.pods(tag=tag) if pod.ready] return results[0] if results else None def pod_exec(self, tag, command, *args): pod = self.pod_find(tag).name return self.runner.start(cmd.KUBECTL_EXEC.format(pod=pod, command=command, args=' '.join(args)), capture=True) def migrate_initial(self, tag): if self.db_migrations_job_config_seed: def job_completions_extractor(job): completions = job.obj['spec']['completions'] succeeded_completions = job.obj['status']['succeeded'] return completions if succeeded_completions == completions else None applied_objects = self.apply('db migration', self.db_migrations_job_config_seed, tag=tag) self.wait_for_resource_running('Job', 'completions', job_completions_extractor, self.db_migrations_job_timeout, *applied_objects) def migrate_apply(self, tag): previous_state = None if self.db_migrations_status_command: previous_state = self.pod_exec(tag, self.db_migrations_status_command) if self.db_migrations_apply_command: self.pod_exec(tag, self.db_migrations_apply_command) return previous_state def migrate_rollback(self, tag, previous_state): if self.db_migrations_rollback_command: self.pod_exec(tag, self.db_migrations_rollback_command, previous_state) def migrate(self, tag): db_migrations_previous_state = None is_initial = self.active_env() is None if is_initial: self.migrate_initial(tag) else: db_migrations_previous_state = self.migrate_apply(tag) return is_initial, db_migrations_previous_state def active_env(self): service = self.kube_api.resource_by_name('Service', self.service_name) return None if not service else service.obj['spec']['selector'].get( 'color', None) def other_env(self): return {'blue': 'green', 'green': 'blue'}.get(self.active_env(), None) def deploy(self, tag): color = self.other_env() or 'blue' applied_objects = self.apply('deployment', self.deployment_config, tag=tag, color=color) self.wait_for_resource_running( 'Deployment', 'replicas', lambda deployment: deployment.replicas if deployment.ready and self.pod_find(tag) else None, self.deployment_timeout, *applied_objects) return color @log('Waiting for {resource_type} {prop} to become available') def wait_for_resource_running(self, resource_type, prop, prop_extractor, timeout_seconds, *object_names): def try_extract_value(resource_name): try: result = self.kube_api.resource_by_name( resource_type, resource_name) return prop_extractor(result or {}) except (IndexError, KeyError, AttributeError): return None def extract_value_with_timeout(resource_name): value = None if timeout_seconds: attempts = 0 while not value and attempts < timeout_seconds: sleep(1) attempts += 1 output('.', '', flush=True) value = try_extract_value(resource_name) else: value = try_extract_value(resource_name) if value: output('\n{} {} {} is: {}'.format(resource_type, resource_name, prop, value)) elif timeout_seconds: raise ActionFailedError( '\nFailed after {} seconds elapsed. For more info try running: $ kubectl describe {} {}' .format(timeout_seconds, resource_type, resource_name)) return value values = [extract_value_with_timeout(name) for name in object_names] return values @log('Running smoke tests on {color} deployment...') def smoke_test(self, color): if self.smoke_service_config: def service_host_extractor(service): if self.is_minikube: service_address = self.runner.start( cmd.MINIKUBE_SERVICE_URL.format(service.name), capture=True) else: service_address = get_loadbalancer_address(service) return service_address if is_host_up(service_address) else None applied_objects = self.apply('smoke service', self.smoke_service_config, color=color) smoke_service_address = ','.join( self.wait_for_resource_running('Service', 'host', service_host_extractor, self.smoke_service_timeout, *applied_objects)) return_code = self.runner.start(self.smoke_tests_command, TEST_HOST=smoke_service_address, silent=True) return return_code == 0 return True @log('Promoting {color} deployment...') def swap(self, color): self.apply('public service', self.service_config, color=color) self.wait_for_resource_running( 'Service', 'status', lambda service: 'ready' if service.exists(ensure=True) else None, self.service_timeout, self.service_name) @log('Publishing...') def publish(self): next_tag = self.build() self.push(next_tag) next_color = self.deploy(next_tag) is_initial, db_migrations_previous_state = self.migrate(next_tag) health_ok = self.smoke_test(next_color) if health_ok: self.swap(next_color) else: if not is_initial: self.migrate_rollback(next_tag, db_migrations_previous_state) raise ActionFailedError( 'Cannot promote {} deployment because smoke tests failed'. format(next_color)) output('Done.') @log('Rolling back to previous deployment...') def rollback(self): color = self.other_env() if color: self.swap(color) else: raise ActionFailedError( 'Cannot rollback to a previous environment because one does not exist.' ) output('Done.')