def logs(context, timestamps, follow, tail, namespace=None): kctl = Kubectl(context, namespace=namespace) opts = '' if timestamps: opts += ' --timestamps' if follow: opts += ' --follow' if tail: opts += " --tail=%s" % tail pods = kctl.get_objects('pod', selector="app=%s,layer=application" % config.project_name) pods = filter(lambda pod: pod['status']['phase'] == 'Running', pods) containers = [] for pod in pods: for container in pod['spec']['containers']: containers.append({'pod': pod['metadata']['name'], 'name': container['name']}) commands = [kctl.command("logs %s %s%s" % (container['pod'], container['name'], opts)) for container in containers] return shout_concurrent(commands, print_output=True)
def k8s_status(context, resources, pods, describe, top, namespace=None, yaml_file_name=None): if yaml_file_name is None: yaml_file_name = context kubernetes_yml = os.path.join(os.getcwd(), "hokusai/%s.yml" % yaml_file_name) if not os.path.isfile(kubernetes_yml): raise HokusaiError("Yaml file %s does not exist." % kubernetes_yml) kctl = Kubectl(context, namespace=namespace) if describe: kctl_cmd = "describe" output = "" else: kctl_cmd = "get" output = " -o wide" if resources: print('') print_green("Resources") print_green("===========") shout(kctl.command("%s -f %s%s" % (kctl_cmd, kubernetes_yml, output)), print_output=True) if pods: print('') print_green("Pods") print_green("===========") shout(kctl.command("%s pods --selector app=%s,layer=application%s" % (kctl_cmd, config.project_name, output)), print_output=True) if top: print('') print_green("Top Pods") print_green("===========") shout(kctl.command("top pods --selector app=%s,layer=application" % config.project_name), print_output=True)
def k8s_status(context, resources, pods, describe, top, namespace=None, filename=None): if filename is None: kubernetes_yml = os.path.join(CWD, HOKUSAI_CONFIG_DIR, "%s.yml" % context) else: kubernetes_yml = filename if not os.path.isfile(kubernetes_yml): raise HokusaiError("Yaml file %s does not exist." % kubernetes_yml) kctl = Kubectl(context, namespace=namespace) if describe: kctl_cmd = "describe" output = "" else: kctl_cmd = "get" output = " -o wide" if resources: print_green("Resources", newline_before=True) print_green("===========") shout(kctl.command("%s -f %s%s" % (kctl_cmd, kubernetes_yml, output)), print_output=True) if pods: print_green("Pods", newline_before=True) print_green("===========") shout(kctl.command("%s pods --selector app=%s,layer=application%s" % (kctl_cmd, config.project_name, output)), print_output=True) if top: print_green("Top Pods", newline_before=True) print_green("===========") shout(kctl.command("top pods --selector app=%s,layer=application" % config.project_name), print_output=True)
def k8s_status(context, resources, pods, describe, top, namespace=None, filename=None): if filename is None: yaml_template = TemplateSelector().get( os.path.join(CWD, HOKUSAI_CONFIG_DIR, context)) else: yaml_template = TemplateSelector().get(filename) kctl = Kubectl(context, namespace=namespace) yaml_spec = YamlSpec(yaml_template).to_file() if describe: kctl_cmd = "describe" output = "" else: kctl_cmd = "get" output = " -o wide" if resources: print_green("Resources", newline_before=True) print_green("===========") shout(kctl.command("%s -f %s%s" % (kctl_cmd, yaml_spec, output)), print_output=True) if pods: print_green("Pods", newline_before=True) print_green("===========") shout(kctl.command("%s pods --selector app=%s,layer=application%s" % (kctl_cmd, config.project_name, output)), print_output=True) if top: print_green("Top Pods", newline_before=True) print_green("===========") shout(kctl.command("top pods --selector app=%s,layer=application" % config.project_name), print_output=True)
def environment_create(context): kubernetes_yml = os.path.join(os.getcwd(), "hokusai/%s.yml" % context) if not os.path.isfile(kubernetes_yml): raise HokusaiError("Yaml file %s does not exist for given context." % kubernetes_yml) ecr = ECR() if not ecr.project_repository_exists(): raise HokusaiError( "ECR repository %s does not exist... did you run `hokusai setup` for this project?" % config.project_name) if not ecr.tag_exists('latest'): raise HokusaiError( "Image tag 'latest' does not exist... did you run `hokusai push`?") if not ecr.tag_exists(context): ecr.retag('latest', context) print_green("Updated tag 'latest' -> %s" % context) kctl = Kubectl(context) shout(kctl.command("create --save-config -f %s" % kubernetes_yml), print_output=True) print_green("Created remote environment %s" % context)
def check(): return_code = 0 def check_ok(check_item): print_green('\u2714 ' + check_item + ' found') def check_err(check_item): print_red('\u2718 ' + check_item + ' not found') try: config.project_name check_ok('Config project-name') except HokusaiError: check_err('Config project-name') try: shout('which docker') check_ok('docker') except CalledProcessError: check_err('docker') return_code += 1 try: shout('which docker-compose') check_ok('docker-compose') except CalledProcessError: check_err('docker-compose') return_code += 1 try: shout('which kubectl') check_ok('kubectl') except CalledProcessError: check_err('kubectl') return_code += 1 try: shout('which git') check_ok('git') except CalledProcessError: check_err('git') return_code += 1 try: boto3.client('sts', region_name=get_region_name()).get_caller_identity() check_ok('Valid AWS credentials') except (botoexceptions.ClientError, botoexceptions.NoCredentialsError): check_err('Valid AWS credentials') return_code += 1 ecr = ECR() if ecr.project_repo_exists(): check_ok("ECR repository '%s'" % config.project_name) else: check_err("ECR repository '%s'" % config.project_name) return_code += 1 try: build_template = TemplateSelector().get(os.path.join(CWD, HOKUSAI_CONFIG_DIR, BUILD_YAML_FILE)) check_ok(HOKUSAI_CONFIG_DIR + '/' + os.path.split(build_template)[-1]) except HokusaiError: check_err('hokusai/build.*') return_code += 1 try: development_template = TemplateSelector().get(os.path.join(CWD, HOKUSAI_CONFIG_DIR, DEVELOPMENT_YML_FILE)) check_ok(HOKUSAI_CONFIG_DIR + '/' + os.path.split(development_template)[-1]) except HokusaiError: check_err('hokusai/development.*') return_code += 1 try: test_template = TemplateSelector().get(os.path.join(CWD, HOKUSAI_CONFIG_DIR, TEST_YML_FILE)) check_ok(HOKUSAI_CONFIG_DIR + '/' + os.path.split(test_template)[-1]) except HokusaiError: check_err('hokusai/test.*') return_code += 1 for context in ['staging', 'production']: try: if context in Kubectl('staging').contexts(): check_ok("kubectl context '%s'" % context) else: check_err("kubectl context '%s'" % context) return_code += 1 except CalledProcessError: check_err('%s context' % context) return_code += 1 try: context_template = TemplateSelector().get(os.path.join(CWD, HOKUSAI_CONFIG_DIR, context)) check_ok(HOKUSAI_CONFIG_DIR + '/' + os.path.split(context_template)[-1]) except HokusaiError: check_err("hokusai/%s.*" % context) return_code += 1 return return_code
def check(): return_code = 0 def check_ok(check_item): print_green(u'\u2714 ' + check_item + ' found') def check_err(check_item): print_red(u'\u2718 ' + check_item + ' not found') config.check() try: config.project_name check_ok('Config project-name') except HokusaiError: check_err('Config project-name') try: shout('docker --version') check_ok('docker') except CalledProcessError: check_err('docker') return_code += 1 try: shout('docker-compose --version') check_ok('docker-compose') except CalledProcessError: check_err('docker-compose') return_code += 1 try: shout('kubectl version') check_ok('kubectl') except CalledProcessError: check_err('kubectl') return_code += 1 try: shout('git version') check_ok('git') except CalledProcessError: check_err('git') return_code += 1 if os.environ.get('AWS_ACCESS_KEY_ID') is not None: check_ok('$AWS_ACCESS_KEY_ID') else: check_err('$AWS_ACCESS_KEY_ID') return_code += 1 if os.environ.get('AWS_SECRET_ACCESS_KEY') is not None: check_ok('$AWS_SECRET_ACCESS_KEY') else: check_err('$AWS_SECRET_ACCESS_KEY') return_code += 1 ecr = ECR() if ecr.project_repo_exists(): check_ok("ECR repository '%s'" % config.project_name) else: check_err("ECR repository '%s'" % config.project_name) return_code += 1 if not os.path.isfile(os.path.join(os.getcwd(), 'hokusai/build.yml')): if os.path.isfile(os.path.join(os.getcwd(), 'hokusai/common.yml')): check_ok('./hokusai/common.yml') else: check_err('./hokusai/build.yml') else: check_ok('./hokusai/build.yml') return_code += 1 if os.path.isfile(os.path.join(os.getcwd(), 'hokusai/development.yml')): check_ok('./hokusai/development.yml') else: check_err('./hokusai/development.yml') return_code += 1 if os.path.isfile(os.path.join(os.getcwd(), 'hokusai/test.yml')): check_ok('./hokusai/test.yml') else: check_err('./hokusai/test.yml') return_code += 1 for context in ['staging', 'production']: if context in Kubectl('staging').contexts(): check_ok("kubectl context '%s'" % context) else: check_err("kubectl context '%s'" % context) return_code += 1 if os.path.isfile(os.path.join(os.getcwd(), "hokusai/%s.yml" % context)): check_ok("./hokusai/%s.yml" % context) else: check_err("./hokusai/%s.yml" % context) return_code += 1 return return_code
def __init__(self, context, namespace=None): self.context = context self.kctl = Kubectl(self.context, namespace=namespace) self.ecr = ECR()
class CommandRunner: def __init__(self, context, namespace=None): self.context = context self.kctl = Kubectl(self.context, namespace=namespace) self.ecr = ECR() def run(self, tag_or_digest, cmd, tty=None, env=(), constraint=()): if os.environ.get('USER') is not None: # The regex used for the validation of name is '[a-z0-9]([-a-z0-9]*[a-z0-9])?' user = re.sub("[^0-9a-z]+", "-", os.environ.get('USER').lower()) uuid = "%s-%s" % (user, k8s_uuid()) else: uuid = k8s_uuid() name = "%s-hokusai-run-%s" % (config.project_name, uuid) separator = "@" if ":" in tag_or_digest else ":" image_name = "%s%s%s" % (self.ecr.project_repo, separator, tag_or_digest) container = { "args": cmd.split(' '), "name": name, "image": image_name, "imagePullPolicy": "Always", 'envFrom': [{ 'configMapRef': { 'name': "%s-environment" % config.project_name } }] } run_tty = tty if tty is not None else config.run_tty if run_tty: container.update({"stdin": True, "stdinOnce": True, "tty": True}) if env: container['env'] = [] for s in env: if '=' not in s: raise HokusaiError( "Error: environment variables must be of the form 'KEY=VALUE'" ) split = s.split('=', 1) container['env'].append({'name': split[0], 'value': split[1]}) spec = {"containers": [container]} constraints = constraint or config.run_constraints if constraints: spec['nodeSelector'] = {} for label in constraints: if '=' not in label: raise HokusaiError( "Error: Node selectors must of the form 'key=value'") split = label.split('=', 1) spec['nodeSelector'][split[0]] = split[1] overrides = {"apiVersion": "v1", "spec": spec} if run_tty: shout(self.kctl.command( "run %s -t -i --image=%s --restart=Never --overrides=%s --rm" % (name, image_name, pipes.quote(json.dumps(overrides)))), print_output=True) else: return returncode( self.kctl.command( "run %s --attach --image=%s --overrides=%s --restart=Never --rm" % (name, image_name, pipes.quote(json.dumps(overrides)))))
class Deployment(object): def __init__(self, context, deployment_name=None, namespace=None): self.context = context self.namespace = namespace self.kctl = Kubectl(self.context, namespace=namespace) self.ecr = ECR() if deployment_name: self.cache = [ self.kctl.get_object("deployment %s" % deployment_name) ] else: self.cache = self.kctl.get_objects( 'deployment', selector="app=%s,layer=application" % config.project_name) def update(self, tag, constraint, git_remote, timeout, update_config=False, filename=None): if not self.ecr.project_repo_exists(): raise HokusaiError("Project repo does not exist. Aborting.") digest = self.ecr.image_digest_for_tag(tag) if digest is None: raise HokusaiError( "Could not find an image digest for tag %s. Aborting." % tag) if self.namespace is None: print_green("Deploying %s to %s..." % (digest, self.context), newline_after=True) else: print_green("Deploying %s to %s/%s..." % (digest, self.context, self.namespace), newline_after=True) """ This logic should be refactored, but essentially if namespace and filename are provided, the caller is a review app, while if namespace is None it is either staging or production. If filename is unset for staging or production it is targeting the 'canonical' app, i.e. staging.yml or production.yml while if it is set it is trageting a 'canary' app. For the canonical app, run deploy hooks and post-depoy steps creating deployment tags For a canary app, skip deploy hooks and post-deploy steps For review apps, run deploy hooks but skip post-deploy steps For all deployment rollouts, if update_config or filename targets a yml file, bust the deployment cache using k8s field selectors and get deployments to watch the rollout from the yml file spec """ # Run the pre-deploy hook for the canonical app or a review app if config.pre_deploy and (filename is None or (filename and self.namespace)): print_green("Running pre-deploy hook '%s'..." % config.pre_deploy, newline_after=True) return_code = CommandRunner(self.context, namespace=self.namespace).run( tag, config.pre_deploy, constraint=constraint, tty=False) if return_code: raise HokusaiError( "Pre-deploy hook failed with return code %s" % return_code, return_code=return_code) # Patch the deployments deployment_timestamp = datetime.datetime.utcnow().strftime("%s%f") if filename is None: kubernetes_yml = os.path.join(CWD, HOKUSAI_CONFIG_DIR, "%s.yml" % self.context) else: kubernetes_yml = filename # If a review app, a canary app or the canonical app while updating config, # bust the deployment cache and populate deployments from the yaml file if filename or update_config: self.cache = [] for item in yaml.safe_load_all(open(kubernetes_yml, 'r')): if item['kind'] == 'Deployment': self.cache.append(item) # If updating config, path the spec and apply if update_config: print_green( "Patching Deployments in spec %s with image digest %s" % (kubernetes_yml, digest), newline_after=True) payload = [] for item in yaml.safe_load_all(open(kubernetes_yml, 'r')): if item['kind'] == 'Deployment': item['spec']['template']['metadata']['labels'][ 'deploymentTimestamp'] = deployment_timestamp item['spec']['progressDeadlineSeconds'] = timeout for container in item['spec']['template']['spec'][ 'containers']: if self.ecr.project_repo in container['image']: container['image'] = "%s@%s" % ( self.ecr.project_repo, digest) payload.append(item) f = NamedTemporaryFile(delete=False) f.write(YAML_HEADER) f.write(yaml.safe_dump_all(payload, default_flow_style=False)) f.close() print_green("Applying patched spec %s..." % f.name, newline_after=True) try: shout(self.kctl.command("apply -f %s" % f.name), print_output=True) finally: os.unlink(f.name) # If not updating config, patch the deployments in the cache and call kubectl patch to update else: for deployment in self.cache: containers = [(container['name'], container['image']) for container in deployment['spec']['template'] ['spec']['containers']] deployment_targets = [{ "name": name, "image": "%s@%s" % (self.ecr.project_repo, digest) } for name, image in containers if self.ecr.project_repo in image] patch = { "spec": { "template": { "metadata": { "labels": { "deploymentTimestamp": deployment_timestamp } }, "spec": { "containers": deployment_targets } }, "progressDeadlineSeconds": timeout } } print_green("Patching deployment %s..." % deployment['metadata']['name'], newline_after=True) shout( self.kctl.command( "patch deployment %s -p '%s'" % (deployment['metadata']['name'], json.dumps(patch)))) # Watch the rollouts in the cache and if any fail, roll back print_green("Waiting for deployment rollouts to complete...") rollout_commands = [ self.kctl.command("rollout status deployment/%s" % deployment['metadata']['name']) for deployment in self.cache ] return_codes = shout_concurrent(rollout_commands, print_output=True) if any(return_codes): print_red( "One or more deployment rollouts failed! Rolling back...", newline_before=True, newline_after=True) rollback_commands = [ self.kctl.command("rollout undo deployment/%s" % deployment['metadata']['name']) for deployment in self.cache ] shout_concurrent(rollback_commands, print_output=True) raise HokusaiError("Deployment failed!") post_deploy_success = True # Run the post-deploy hook for the canonical app or a review app if config.post_deploy and (filename is None or (filename and self.namespace)): print_green("Running post-deploy hook '%s'..." % config.post_deploy, newline_after=True) return_code = CommandRunner(self.context, namespace=self.namespace).run( tag, config.post_deploy, constraint=constraint, tty=False) if return_code: print_yellow( "WARNING: Running the post-deploy hook failed with return code %s" % return_code, newline_before=True, newline_after=True) print_yellow( "The image digest %s has been rolled out. However, you should run the post-deploy hook '%s' manually, or re-run this deployment." % (digest, config.post_deploy), newline_after=True) post_deploy_success = False # For the canonical app, create tags if filename is None: deployment_tag = "%s--%s" % ( self.context, datetime.datetime.utcnow().strftime("%Y-%m-%d--%H-%M-%S")) print_green("Updating ECR deployment tags in %s..." % self.ecr.project_repo, newline_after=True) try: self.ecr.retag(tag, self.context) print_green("Updated ECR tag %s -> %s" % (tag, self.context)) self.ecr.retag(tag, deployment_tag) print_green("Updated ECR tag %s -> %s" % (tag, deployment_tag), newline_after=True) except (ValueError, ClientError) as e: print_yellow( "WARNING: Updating ECR deployment tags failed due to the error: '%s'" % str(e), newline_before=True, newline_after=True) print_yellow( "The tag %s has been rolled out. However, you should create the ECR tags '%s' and '%s' manually, or re-run this deployment." % (tag, deployment_tag, self.context), newline_after=True) post_deploy_success = False remote = git_remote or config.git_remote if remote: print_green("Pushing Git deployment tags to %s..." % remote, newline_after=True) try: shout("git fetch %s" % remote) shout("git tag -f %s %s" % (self.context, tag), print_output=True) shout("git tag -f %s %s" % (deployment_tag, tag), print_output=True) shout("git push -f --no-verify %s refs/tags/%s" % (remote, self.context), print_output=True) print_green("Updated Git tag %s -> %s" % (tag, self.context)) shout("git push -f --no-verify %s refs/tags/%s" % (remote, deployment_tag), print_output=True) print_green("Updated Git tag %s -> %s" % (tag, deployment_tag), newline_after=True) except CalledProcessError as e: print_yellow( "WARNING: Creating Git deployment tags failed due to the error: '%s'" % str(e), newline_before=True, newline_after=True) print_yellow( "The tag %s has been rolled out. However, you should create the Git tags '%s' and '%s' manually, or re-run this deployment." % (tag, deployment_tag, self.context), newline_after=True) post_deploy_success = False if post_deploy_success: print_green("Deployment succeeded!") else: raise HokusaiError("One or more post-deploy steps failed!") def refresh(self): deployment_timestamp = datetime.datetime.utcnow().strftime("%s%f") for deployment in self.cache: patch = { "spec": { "template": { "metadata": { "labels": { "deploymentTimestamp": deployment_timestamp } } } } } print_green("Refreshing %s..." % deployment['metadata']['name'], newline_after=True) shout( self.kctl.command( "patch deployment %s -p '%s'" % (deployment['metadata']['name'], json.dumps(patch)))) print_green("Waiting for refresh to complete...") rollout_commands = [ self.kctl.command("rollout status deployment/%s" % deployment['metadata']['name']) for deployment in self.cache ] return_codes = shout_concurrent(rollout_commands, print_output=True) if any(return_codes): raise HokusaiError("Refresh failed!") @property def names(self): return [deployment['metadata']['name'] for deployment in self.cache]
class Deployment(object): def __init__(self, context, deployment_name=None, namespace=None): self.context = context self.namespace = namespace self.kctl = Kubectl(self.context, namespace=namespace) self.ecr = ECR() if deployment_name: self.cache = [ self.kctl.get_object("deployment %s" % deployment_name) ] else: self.cache = self.kctl.get_objects( 'deployment', selector="app=%s,layer=application" % config.project_name) def update(self, tag, constraint, git_remote, resolve_tag_sha1=True): if not self.ecr.project_repo_exists(): raise HokusaiError("Project repo does not exist. Aborting.") if resolve_tag_sha1: tag = self.ecr.find_git_sha1_image_tag(tag) if tag is None: raise HokusaiError( "Could not find a git SHA1 for tag %s. Aborting." % tag) if self.namespace is None: print_green("Deploying %s to %s..." % (tag, self.context)) else: print_green("Deploying %s to %s/%s..." % (tag, self.context, self.namespace)) if self.namespace is None: self.ecr.retag(tag, self.context) print_green("Updated tag %s -> %s" % (tag, self.context)) deployment_tag = "%s--%s" % ( self.context, datetime.datetime.utcnow().strftime("%Y-%m-%d--%H-%M-%S")) self.ecr.retag(tag, deployment_tag) print_green("Updated tag %s -> %s" % (tag, deployment_tag)) if git_remote is not None: print_green("Pushing deployment tags to %s..." % git_remote) shout("git tag -f %s" % self.context, print_output=True) shout("git tag %s" % deployment_tag, print_output=True) shout("git push --force %s --tags" % git_remote, print_output=True) if config.pre_deploy is not None: print_green("Running pre-deploy hook '%s'..." % config.pre_deploy) return_code = CommandRunner(self.context, namespace=self.namespace).run( tag, config.pre_deploy, constraint=constraint) if return_code: raise HokusaiError( "Pre-deploy hook failed with return code %s" % return_code, return_code=return_code) deployment_timestamp = datetime.datetime.utcnow().strftime("%s%f") for deployment in self.cache: containers = deployment['spec']['template']['spec']['containers'] container_names = [container['name'] for container in containers] deployment_targets = [{ "name": name, "image": "%s:%s" % (self.ecr.project_repo, tag) } for name in container_names] patch = { "spec": { "template": { "metadata": { "labels": { "deploymentTimestamp": deployment_timestamp } }, "spec": { "containers": deployment_targets } } } } print_green("Patching deployment %s..." % deployment['metadata']['name']) shout( self.kctl.command( "patch deployment %s -p '%s'" % (deployment['metadata']['name'], json.dumps(patch)))) print_green("Waiting for rollout to complete...") rollout_commands = [ self.kctl.command("rollout status deployment/%s" % deployment['metadata']['name']) for deployment in self.cache ] return_code = shout_concurrent(rollout_commands) if return_code: raise HokusaiError("Deployment failed!", return_code=return_code) if config.post_deploy is not None: print_green("Running post-deploy hook '%s'..." % config.post_deploy) return_code = CommandRunner(self.context, namespace=self.namespace).run( tag, config.post_deploy, constraint=constraint) if return_code: raise HokusaiError( "Post-deploy hook failed with return code %s" % return_code, return_code=return_code) def refresh(self): deployment_timestamp = datetime.datetime.utcnow().strftime("%s%f") for deployment in self.cache: patch = { "spec": { "template": { "metadata": { "labels": { "deploymentTimestamp": deployment_timestamp } } } } } print_green("Refreshing %s..." % deployment['metadata']['name']) shout( self.kctl.command( "patch deployment %s -p '%s'" % (deployment['metadata']['name'], json.dumps(patch)))) print_green("Waiting for refresh to complete...") rollout_commands = [ self.kctl.command("rollout status deployment/%s" % deployment['metadata']['name']) for deployment in self.cache ] return_code = shout_concurrent(rollout_commands) if return_code: raise HokusaiError("Refresh failed!", return_code=return_code) @property def names(self): return [deployment['metadata']['name'] for deployment in self.cache] @property def current_tag(self): images = [] for deployment in self.cache: containers = deployment['spec']['template']['spec']['containers'] container_names = [container['name'] for container in containers] container_images = [container['image'] for container in containers] if not all(x == container_images[0] for x in container_images): raise HokusaiError( "Deployment's containers do not reference the same image tag. Aborting." ) images.append(containers[0]['image']) if not all(y == images[0] for y in images): raise HokusaiError( "Deployments do not reference the same image tag. Aborting.") return images[0].rsplit(':', 1)[1]
class CommandRunner(object): def __init__(self, context): self.context = context self.kctl = Kubectl(self.context) def run(self, image_tag, cmd, tty=False, env=(), constraint=()): if os.environ.get('USER') is not None: uuid = "%s-%s" % (os.environ.get('USER'), k8s_uuid()) else: uuid = k8s_uuid() name = "%s-hokusai-run-%s" % (config.project_name, uuid) image_name = "%s:%s" % (config.aws_ecr_registry, image_tag) container = { "args": cmd.split(' '), "name": name, "image": image_name, "imagePullPolicy": "Always", 'envFrom': [{ 'configMapRef': { 'name': "%s-environment" % config.project_name } }] } if tty: container.update({"stdin": True, "stdinOnce": True, "tty": True}) if env: container['env'] = [] for s in env: if '=' not in s: raise HokusaiError( "Error: environment variables must be of the form 'KEY=VALUE'" ) split = s.split('=', 1) container['env'].append({'name': split[0], 'value': split[1]}) spec = {"containers": [container]} if constraint: spec['nodeSelector'] = {} for label in constraint: if '=' not in label: raise HokusaiError( "Error: Node selectors must of the form 'key=value'") split = label.split('=', 1) spec['nodeSelector'][split[0]] = split[1] overrides = {"apiVersion": "v1", "spec": spec} if tty: shout(self.kctl.command( "run %s -t -i --image=%s --restart=Never --overrides='%s' --rm" % (name, image_name, json.dumps(overrides))), print_output=True) else: return returncode( self.kctl.command( "run %s --attach --image=%s --overrides='%s' --restart=Never --rm" % (name, image_name, json.dumps(overrides))))
def __init__(self, context): self.context = context self.kctl = Kubectl(self.context)
class Deployment(object): def __init__(self, context): self.context = context self.kctl = Kubectl(self.context) self.cache = self.kctl.get_object('deployment', selector="app=%s,layer=application" % config.project_name) def update(self, tag, constraint): print_green("Deploying %s to %s..." % (tag, self.context)) if self.context != tag: ecr = ECR() ecr.retag(tag, self.context) print_green("Updated tag %s -> %s" % (tag, self.context)) deployment_tag = "%s--%s" % ( self.context, datetime.datetime.utcnow().strftime("%Y-%m-%d--%H-%M-%S")) ecr.retag(tag, deployment_tag) print_green("Updated tag %s -> %s" % (tag, deployment_tag)) if config.pre_deploy is not None: print_green("Running pre-deploy hook '%s' on %s..." % (config.pre_deploy, self.context)) return_code = CommandRunner(self.context).run( tag, config.pre_deploy, constraint=constraint) if return_code: raise HokusaiError( "Pre-deploy hook failed with return code %s" % return_code, return_code=return_code) deployment_timestamp = datetime.datetime.utcnow().strftime("%s%f") for deployment in self.cache: containers = deployment['spec']['template']['spec']['containers'] container_names = [container['name'] for container in containers] deployment_targets = [{ "name": name, "image": "%s:%s" % (config.aws_ecr_registry, tag) } for name in container_names] patch = { "spec": { "template": { "metadata": { "labels": { "deploymentTimestamp": deployment_timestamp } }, "spec": { "containers": deployment_targets } } } } print_green("Patching deployment %s..." % deployment['metadata']['name']) shout( self.kctl.command( "patch deployment %s -p '%s'" % (deployment['metadata']['name'], json.dumps(patch)))) print_green("Waiting for rollout to complete...") rollout_commands = [ self.kctl.command("rollout status deployment/%s" % deployment['metadata']['name']) for deployment in self.cache ] return_code = shout_concurrent(rollout_commands) if return_code: raise HokusaiError("Deployment failed!", return_code=return_code) if config.post_deploy is not None: print_green("Running post-deploy hook '%s' on %s..." % (config.post_deploy, self.context)) return_code = CommandRunner(self.context).run( tag, config.post_deploy, constraint=constraint) if return_code: raise HokusaiError( "Post-deploy hook failed with return code %s" % return_code, return_code=return_code) def refresh(self): deployment_timestamp = datetime.datetime.utcnow().strftime("%s%f") for deployment in self.cache: patch = { "spec": { "template": { "metadata": { "labels": { "deploymentTimestamp": deployment_timestamp } } } } } print_green("Refreshing %s..." % deployment['metadata']['name']) shout( self.kctl.command( "patch deployment %s -p '%s'" % (deployment['metadata']['name'], json.dumps(patch)))) print_green("Waiting for refresh to complete...") rollout_commands = [ self.kctl.command("rollout status deployment/%s" % deployment['metadata']['name']) for deployment in self.cache ] return_code = shout_concurrent(rollout_commands) if return_code: raise HokusaiError("Refresh failed!", return_code=return_code) def history(self, deployment_name): replicasets = self.kctl.get_object( 'replicaset', selector="app=%s,layer=application" % config.project_name) replicasets = filter( lambda rs: rs['metadata']['ownerReferences'][0]['name'] == deployment_name, replicasets) return sorted(replicasets, key=lambda rs: int(rs['metadata']['annotations'][ 'deployment.kubernetes.io/revision'])) @property def names(self): return [deployment['metadata']['name'] for deployment in self.cache] @property def current_tag(self): images = [] for deployment in self.cache: containers = deployment['spec']['template']['spec']['containers'] container_names = [container['name'] for container in containers] container_images = [container['image'] for container in containers] if not all(x == container_images[0] for x in container_images): raise HokusaiError( "Deployment's containers do not reference the same image tag", return_code=return_code) images.append(containers[0]['image']) if not all(y == images[0] for y in images): raise HokusaiError( "Deployments do not reference the same image tag", return_code=return_code) return images[0].rsplit(':', 1)[1]
class Deployment(object): def __init__(self, context, deployment_name=None, namespace=None): self.context = context self.namespace = namespace self.kctl = Kubectl(self.context, namespace=namespace) self.ecr = ECR() if deployment_name: self.cache = [ self.kctl.get_object("deployment %s" % deployment_name) ] else: self.cache = self.kctl.get_objects( 'deployment', selector="app=%s,layer=application" % config.project_name) def update(self, tag, constraint, git_remote, timeout, resolve_tag_sha1=True): if not self.ecr.project_repo_exists(): raise HokusaiError("Project repo does not exist. Aborting.") if resolve_tag_sha1: tag = self.ecr.find_git_sha1_image_tag(tag) if tag is None: raise HokusaiError( "Could not find a git SHA1 for tag %s. Aborting." % tag) if self.namespace is None: print_green("Deploying %s to %s..." % (tag, self.context), newline_after=True) else: print_green("Deploying %s to %s/%s..." % (tag, self.context, self.namespace), newline_after=True) if config.pre_deploy is not None: print_green("Running pre-deploy hook '%s'..." % config.pre_deploy, newline_after=True) return_code = CommandRunner(self.context, namespace=self.namespace).run( tag, config.pre_deploy, constraint=constraint, tty=False) if return_code: raise HokusaiError( "Pre-deploy hook failed with return code %s" % return_code, return_code=return_code) deployment_timestamp = datetime.datetime.utcnow().strftime("%s%f") for deployment in self.cache: containers = [(container['name'], container['image']) for container in deployment['spec']['template'] ['spec']['containers']] deployment_targets = [{ "name": name, "image": "%s:%s" % (self.ecr.project_repo, tag) } for name, image in containers if self.ecr.project_repo in image] patch = { "spec": { "template": { "metadata": { "labels": { "deploymentTimestamp": deployment_timestamp } }, "spec": { "containers": deployment_targets } }, "progressDeadlineSeconds": timeout } } print_green("Patching deployment %s..." % deployment['metadata']['name'], newline_after=True) shout( self.kctl.command( "patch deployment %s -p '%s'" % (deployment['metadata']['name'], json.dumps(patch)))) print_green("Waiting for deployment rollouts to complete...") rollout_commands = [ self.kctl.command("rollout status deployment/%s" % deployment['metadata']['name']) for deployment in self.cache ] return_codes = shout_concurrent(rollout_commands, print_output=True) if any(return_codes): print_red( "One or more deployment rollouts timed out! Rolling back...", newline_before=True, newline_after=True) rollback_commands = [ self.kctl.command("rollout undo deployment/%s" % deployment['metadata']['name']) for deployment in self.cache ] shout_concurrent(rollback_commands, print_output=True) raise HokusaiError("Deployment failed!") post_deploy_success = True if config.post_deploy is not None: print_green("Running post-deploy hook '%s'..." % config.post_deploy, newline_after=True) return_code = CommandRunner(self.context, namespace=self.namespace).run( tag, config.post_deploy, constraint=constraint, tty=False) if return_code: print_yellow( "WARNING: Running the post-deploy hook failed with return code %s" % return_code, newline_before=True, newline_after=True) print_yellow( "The tag %s has been rolled out. However, you should run the post-deploy hook '%s' manually, or re-run this deployment." % (tag, config.post_deploy), newline_after=True) post_deploy_success = False if self.namespace is None: deployment_tag = "%s--%s" % ( self.context, datetime.datetime.utcnow().strftime("%Y-%m-%d--%H-%M-%S")) print_green("Updating ECR deployment tags in %s..." % self.ecr.project_repo, newline_after=True) try: self.ecr.retag(tag, self.context) print_green("Updated ECR tag %s -> %s" % (tag, self.context)) self.ecr.retag(tag, deployment_tag) print_green("Updated ECR tag %s -> %s" % (tag, deployment_tag), newline_after=True) except (ValueError, ClientError) as e: print_yellow( "WARNING: Updating ECR deployment tags failed due to the error: '%s'" % str(e), newline_before=True, newline_after=True) print_yellow( "The tag %s has been rolled out. However, you should create the ECR tags '%s' and '%s' manually, or re-run this deployment." % (tag, deployment_tag, self.context), newline_after=True) post_deploy_success = False remote = git_remote or config.git_remote if remote is not None: print_green("Pushing Git deployment tags to %s..." % remote, newline_after=True) try: shout("git fetch %s" % remote) shout("git tag -f %s %s" % (self.context, tag), print_output=True) shout("git tag -f %s %s" % (deployment_tag, tag), print_output=True) shout("git push -f --no-verify %s refs/tags/%s" % (remote, self.context), print_output=True) print_green("Updated Git tag %s -> %s" % (tag, self.context)) shout("git push -f --no-verify %s refs/tags/%s" % (remote, deployment_tag), print_output=True) print_green("Updated Git tag %s -> %s" % (tag, deployment_tag), newline_after=True) except CalledProcessError as e: print_yellow( "WARNING: Creating Git deployment tags failed due to the error: '%s'" % str(e), newline_before=True, newline_after=True) print_yellow( "The tag %s has been rolled out. However, you should create the Git tags '%s' and '%s' manually, or re-run this deployment." % (tag, deployment_tag, self.context), newline_after=True) post_deploy_success = False if post_deploy_success: print_green("Deployment succeeded!") else: raise HokusaiError("One or more post-deploy steps failed!") def refresh(self): deployment_timestamp = datetime.datetime.utcnow().strftime("%s%f") for deployment in self.cache: patch = { "spec": { "template": { "metadata": { "labels": { "deploymentTimestamp": deployment_timestamp } } } } } print_green("Refreshing %s..." % deployment['metadata']['name'], newline_after=True) shout( self.kctl.command( "patch deployment %s -p '%s'" % (deployment['metadata']['name'], json.dumps(patch)))) print_green("Waiting for refresh to complete...") rollout_commands = [ self.kctl.command("rollout status deployment/%s" % deployment['metadata']['name']) for deployment in self.cache ] return_codes = shout_concurrent(rollout_commands, print_output=True) if any(return_codes): raise HokusaiError("Refresh failed!") @property def names(self): return [deployment['metadata']['name'] for deployment in self.cache] @property def current_tag(self): images = [] for deployment in self.cache: containers = deployment['spec']['template']['spec']['containers'] container_images = [ container['image'] for container in containers if self.ecr.project_repo in container['image'] ] if not container_images: raise HokusaiError( "Deployment has no valid target containers. Aborting.") if not all(x == container_images[0] for x in container_images): raise HokusaiError( "Deployment's containers do not reference the same image tag. Aborting." ) images.append(container_images[0]) if not all(y == images[0] for y in images): raise HokusaiError( "Deployments do not reference the same image tag. Aborting.") return images[0].rsplit(':', 1)[1]
def __init__(self, context): self.context = context self.kctl = Kubectl(self.context) self.cache = self.kctl.get_object('deployment', selector="app=%s,layer=application" % config.project_name)
if os.path.isfile(os.path.join(os.getcwd(), 'hokusai/development.yml')): check_ok('./hokusai/development.yml') else: check_err('./hokusai/development.yml') return_code += 1 if os.path.isfile(os.path.join(os.getcwd(), 'hokusai/test.yml')): check_ok('./hokusai/test.yml') else: check_err('./hokusai/test.yml') return_code += 1 for context in ['staging', 'production']: try: if context in Kubectl('staging').contexts(): check_ok("kubectl context '%s'" % context) else: check_err("kubectl context '%s'" % context) return_code += 1 if os.path.isfile( os.path.join(os.getcwd(), "hokusai/%s.yml" % context)): check_ok("./hokusai/%s.yml" % context) else: check_err("./hokusai/%s.yml" % context) return_code += 1 except CalledProcessError: check_err('%s context' % context) return_code += 1