def k8s_status(context, resources, pods, describe, top, namespace=None, yaml_file_name=None): if yaml_file_name is None: yaml_file_name = context kubernetes_yml = os.path.join(CWD, "%s/%s.yml" % (HOKUSAI_CONFIG_DIR, yaml_file_name)) if not os.path.isfile(kubernetes_yml): raise HokusaiError("Yaml file %s does not exist." % kubernetes_yml) kctl = Kubectl(context, namespace=namespace) if describe: kctl_cmd = "describe" output = "" else: kctl_cmd = "get" output = " -o wide" if resources: print_green("Resources", newline_before=True) print_green("===========") shout(kctl.command("%s -f %s%s" % (kctl_cmd, kubernetes_yml, output)), print_output=True) if pods: print_green("Pods", newline_before=True) print_green("===========") shout(kctl.command("%s pods --selector app=%s,layer=application%s" % (kctl_cmd, config.project_name, output)), print_output=True) if top: print_green("Top Pods", newline_before=True) print_green("===========") shout(kctl.command("top pods --selector app=%s,layer=application" % config.project_name), print_output=True)
def k8s_status(context, resources, pods, describe, top, namespace=None, filename=None): if filename is None: yaml_template = TemplateSelector().get(os.path.join(CWD, HOKUSAI_CONFIG_DIR, context)) else: yaml_template = TemplateSelector().get(filename) kctl = Kubectl(context, namespace=namespace) yaml_spec = YamlSpec(yaml_template).to_file() if describe: kctl_cmd = "describe" output = "" else: kctl_cmd = "get" output = " -o wide" if resources: print_green("Resources", newline_before=True) print_green("===========") shout(kctl.command("%s -f %s%s" % (kctl_cmd, yaml_spec, output)), print_output=True) if pods: print_green("Pods", newline_before=True) print_green("===========") shout(kctl.command("%s pods --selector app=%s,layer=application%s" % (kctl_cmd, config.project_name, output)), print_output=True) if top: print_green("Top Pods", newline_before=True) print_green("===========") shout(kctl.command("top pods --selector app=%s,layer=application" % config.project_name), print_output=True)
def k8s_update(context, namespace=None, filename=None, check_branch="master", check_remote=None, skip_checks=False, dry_run=False): if filename is None: yaml_template = TemplateSelector().get(os.path.join(CWD, HOKUSAI_CONFIG_DIR, context)) else: yaml_template = TemplateSelector().get(filename) if not skip_checks: current_branch = None for branchname in shout('git branch').splitlines(): if '* ' in branchname: current_branch = branchname.replace('* ', '') break if 'detached' in current_branch: raise HokusaiError("Not on any branch! Aborting.") if current_branch != check_branch: raise HokusaiError("Not on %s branch! Aborting." % check_branch) remotes = [check_remote] if check_remote else shout('git remote').splitlines() for remote in remotes: shout("git fetch %s" % remote) if returncode("git diff --quiet %s/%s" % (remote, current_branch)): raise HokusaiError("Local branch %s is divergent from %s/%s. Aborting." % (current_branch, remote, current_branch)) kctl = Kubectl(context, namespace=namespace) yaml_spec = YamlSpec(yaml_template).to_file() if dry_run: shout(kctl.command("apply -f %s --dry-run" % yaml_spec), print_output=True) print_green("Updated Kubernetes environment %s (dry run)" % yaml_template) else: shout(kctl.command("apply -f %s" % yaml_spec), print_output=True) print_green("Updated Kubernetes environment %s" % yaml_template)
class ConfigMap(object): def __init__(self, context, namespace=None): self.context = context self.kctl = Kubectl(context, namespace=namespace) self.struct = OrderedDict([('apiVersion', 'v1'), ('kind', 'ConfigMap'), ('metadata', { 'labels': { 'app': config.project_name }, 'name': "%s-environment" % config.project_name }), ('data', {})]) def _to_file(self): f = NamedTemporaryFile(delete=False) f.write(yaml.safe_dump(self.struct, default_flow_style=False)) f.close() return f.name def create(self): f = self._to_file() try: shout(self.kctl.command("create -f %s" % f)) finally: os.unlink(f) def destroy(self): shout( self.kctl.command("delete configmap %s-environment" % config.project_name)) def load(self): payload = shout( self.kctl.command("get configmap %s-environment -o yaml" % config.project_name)) struct = yaml.load(payload) if 'data' in struct: self.struct['data'] = struct['data'] else: self.struct['data'] = {} def save(self): f = self._to_file() try: shout(self.kctl.command("apply -f %s" % f)) finally: os.unlink(f) def all(self): for k, v in self.struct['data'].iteritems(): yield k, v def update(self, key, value): self.struct['data'].update({key: value}) def delete(self, key): try: del self.struct['data'][key] except KeyError: raise HokusaiError("Cannot unset '%s' as it does not exist" % key)
class CommandRunner(object): def __init__(self, context, namespace=None): self.context = context self.kctl = Kubectl(self.context, namespace=namespace) self.ecr = ECR() def run(self, image_tag, cmd, tty=None, env=(), constraint=()): if not self.ecr.project_repo_exists(): raise HokusaiError("Project repo does not exist. Aborting.") if os.environ.get('USER') is not None: uuid = "%s-%s" % (os.environ.get('USER'), k8s_uuid()) else: uuid = k8s_uuid() name = "%s-hokusai-run-%s" % (config.project_name, uuid) image_name = "%s:%s" % (self.ecr.project_repo, image_tag) container = { "args": cmd.split(' '), "name": name, "image": image_name, "imagePullPolicy": "Always", 'envFrom': [{'configMapRef': {'name': "%s-environment" % config.project_name}}] } run_tty = tty if tty is not None else config.run_tty if run_tty: container.update({ "stdin": True, "stdinOnce": True, "tty": True }) if env: container['env'] = [] for s in env: if '=' not in s: raise HokusaiError("Error: environment variables must be of the form 'KEY=VALUE'") split = s.split('=', 1) container['env'].append({'name': split[0], 'value': split[1]}) spec = { "containers": [container] } constraints = constraint or config.run_constraints if constraints: spec['nodeSelector'] = {} for label in constraints: if '=' not in label: raise HokusaiError("Error: Node selectors must of the form 'key=value'") split = label.split('=', 1) spec['nodeSelector'][split[0]] = split[1] overrides = { "apiVersion": "v1", "spec": spec } if run_tty: shout(self.kctl.command("run %s -t -i --image=%s --restart=Never --overrides=%s --rm" % (name, image_name, pipes.quote(json.dumps(overrides)))), print_output=True) else: return returncode(self.kctl.command("run %s --attach --image=%s --overrides=%s --restart=Never --rm" % (name, image_name, pipes.quote(json.dumps(overrides)))))
class ConfigMap(object): def __init__(self, context, namespace='default', name=None): self.context = context self.kctl = Kubectl(context, namespace=namespace) self.name = name or "%s-environment" % config.project_name self.metadata = {'name': self.name, 'namespace': namespace} if name is None: self.metadata['labels'] = {'app': config.project_name} self.struct = { 'apiVersion': 'v1', 'kind': 'ConfigMap', 'metadata': self.metadata, 'data': {} } def _to_file(self): f = NamedTemporaryFile(delete=False) f.write(json.dumps(self.struct)) f.close() return f def create(self): f = self._to_file() try: shout(self.kctl.command("create -f %s" % f.name)) finally: os.unlink(f.name) def destroy(self): shout(self.kctl.command("delete configmap %s" % self.name)) def load(self): payload = shout( self.kctl.command("get configmap %s -o json" % self.name)) struct = json.loads(payload) if 'data' in struct: self.struct['data'] = struct['data'] else: self.struct['data'] = {} def save(self): f = self._to_file() try: shout(self.kctl.command("apply -f %s" % f.name)) finally: os.unlink(f.name) def all(self): return self.struct['data'] def update(self, key, value): self.struct['data'].update({key: value}) def delete(self, key): try: del self.struct['data'][key] except KeyError: raise HokusaiError("Cannot unset '%s' as it does not exist" % key)
def k8s_create(context, tag='latest', namespace=None, filename=None): if filename is None: kubernetes_yml = os.path.join(CWD, HOKUSAI_CONFIG_DIR, "%s.yml" % context) else: kubernetes_yml = filename if not os.path.isfile(kubernetes_yml): raise HokusaiError("Yaml file %s does not exist." % kubernetes_yml) ecr = ECR() if not ecr.project_repo_exists(): raise HokusaiError( "ECR repository %s does not exist... did you run `hokusai setup` for this project?" % config.project_name) if not ecr.tag_exists(tag): raise HokusaiError( "Image tag %s does not exist... did you run `hokusai registry push`?" % tag) if tag is 'latest' and not ecr.tag_exists(context): ecr.retag(tag, context) print_green("Updated tag 'latest' -> %s" % context) if filename is None: configmap = ConfigMap(context, namespace=namespace) configmap.create() print_green("Created configmap %s-environment" % config.project_name) kctl = Kubectl(context, namespace=namespace) shout(kctl.command("create --save-config -f %s" % kubernetes_yml), print_output=True) print_green("Created Kubernetes environment %s" % kubernetes_yml)
def k8s_create(context, tag='latest', namespace=None, filename=None, environment=()): if filename is None: yaml_template = TemplateSelector().get(os.path.join(CWD, HOKUSAI_CONFIG_DIR, context)) else: yaml_template = TemplateSelector().get(filename) ecr = ECR() if not ecr.project_repo_exists(): raise HokusaiError("ECR repository %s does not exist... did you run `hokusai setup` for this project?" % config.project_name) if not ecr.tag_exists(tag): raise HokusaiError("Image tag %s does not exist... did you run `hokusai registry push`?" % tag) if tag == 'latest' and not ecr.tag_exists(context): ecr.retag(tag, context) print_green("Updated tag 'latest' -> %s" % context) if filename is None: configmap = ConfigMap(context, namespace=namespace) for s in environment: if '=' not in s: raise HokusaiError("Error: environment variables must be of the form 'KEY=VALUE'") split = s.split('=', 1) configmap.update(split[0], split[1]) configmap.create() print_green("Created configmap %s-environment" % config.project_name) kctl = Kubectl(context, namespace=namespace) yaml_spec = YamlSpec(yaml_template).to_file() shout(kctl.command("create --save-config -f %s" % yaml_spec), print_output=True) print_green("Created Kubernetes environment %s" % yaml_template)
def logs(context, timestamps, follow, tail, previous, labels, namespace=None): kctl = Kubectl(context, namespace=namespace) opts = '' if timestamps: opts += ' --timestamps' if previous: opts += ' --previous' if follow or config.follow_logs: opts += ' --follow' if tail or config.tail_logs: num_tail = tail if tail else config.tail_logs opts += " --tail=%s" % num_tail selectors = ["app=%s" % config.project_name, "layer=application"] for l in labels: if '=' not in l: raise HokusaiError("Error: label selectors of the form 'key=value'") selectors.append(l) pods = kctl.get_objects('pod', selector=(',').join(selectors)) pods = [pod for pod in pods if pod['status']['phase'] == 'Running'] containers = [] for pod in pods: for container in pod['spec']['containers']: containers.append({'pod': pod['metadata']['name'], 'name': container['name']}) commands = [kctl.command("logs %s %s%s" % (container['pod'], container['name'], opts)) for container in containers] shout_concurrent(commands, print_output=True)
def logs(context, timestamps, follow, tail): kctl = Kubectl(context) opts = '' if timestamps: opts += ' --timestamps' if follow: opts += ' --follow' if tail: opts += " --tail=%s" % tail pods = kctl.get_object('pod', selector="app=%s,layer=application" % config.project_name) pods = filter(lambda pod: pod['status']['phase'] == 'Running', pods) containers = [] for pod in pods: for container in pod['spec']['containers']: containers.append({ 'pod': pod['metadata']['name'], 'name': container['name'] }) commands = [ kctl.command("logs %s %s%s" % (container['pod'], container['name'], opts)) for container in containers ] return shout_concurrent(commands, print_output=True)
def k8s_create(context, tag='latest', namespace=None, yaml_file_name=None): if yaml_file_name is None: yaml_file_name = context kubernetes_yml = os.path.join(os.getcwd(), "hokusai/%s.yml" % yaml_file_name) if not os.path.isfile(kubernetes_yml): raise HokusaiError("Yaml file %s does not exist." % kubernetes_yml) ecr = ECR() if not ecr.project_repo_exists(): raise HokusaiError( "ECR repository %s does not exist... did you run `hokusai setup` for this project?" % config.project_name) if not ecr.tag_exists(tag): raise HokusaiError( "Image tag %s does not exist... did you run `hokusai push`?" % tag) if tag is 'latest' and not ecr.tag_exists(context): ecr.retag(tag, context) print_green("Updated tag 'latest' -> %s" % context) kctl = Kubectl(context, namespace=namespace) shout(kctl.command("create --save-config -f %s" % kubernetes_yml), print_output=True) print_green("Created Kubernetes environment %s" % kubernetes_yml)
def k8s_update(context, namespace=None, yaml_file_name=None): if yaml_file_name is None: yaml_file_name = context kubernetes_yml = os.path.join(os.getcwd(), "hokusai/%s.yml" % yaml_file_name) if not os.path.isfile(kubernetes_yml): raise HokusaiError("Yaml file %s does not exist." % kubernetes_yml) kctl = Kubectl(context, namespace=namespace) shout(kctl.command("apply -f %s" % kubernetes_yml), print_output=True) print_green("Updated Kubernetes environment %s" % kubernetes_yml)
def k8s_delete(context, namespace=None, yaml_file_name=None): if yaml_file_name is None: yaml_file_name = context kubernetes_yml = os.path.join(CWD, "%s/%s.yml" % (HOKUSAI_CONFIG_DIR, yaml_file_name)) if not os.path.isfile(kubernetes_yml): raise HokusaiError("Yaml file %s does not exist." % kubernetes_yml) kctl = Kubectl(context, namespace=namespace) shout(kctl.command("delete -f %s" % kubernetes_yml), print_output=True) print_green("Deleted Kubernetes environment %s" % kubernetes_yml)
def environment_delete(context): kubernetes_yml = os.path.join(os.getcwd(), "hokusai/%s.yml" % context) if not os.path.isfile(kubernetes_yml): raise HokusaiError("Yaml file %s does not exist for given context." % kubernetes_yml) kctl = Kubectl(context) shout(kctl.command("delete -f %s" % kubernetes_yml), print_output=True) print_green("Deleted remote environment %s" % context)
def environment_status(context): kctl = Kubectl(context) print('') print_green("Deployments") print_green('-----------------------------------------------------------') shout(kctl.command("get deployments --selector app=%s -o wide" % config.project_name), print_output=True) print('') print_green("Services") print_green('-----------------------------------------------------------') shout(kctl.command("get services --selector app=%s -o wide" % config.project_name), print_output=True) print('') print_green("Pods") print_green('-----------------------------------------------------------') shout(kctl.command("get pods --selector app=%s -o wide" % config.project_name), print_output=True)
def k8s_delete(context, namespace=None, filename=None): if filename is None: yaml_template = TemplateSelector().get(os.path.join(CWD, HOKUSAI_CONFIG_DIR, context)) else: yaml_template = TemplateSelector().get(filename) if filename is None: configmap = ConfigMap(context, namespace=namespace) configmap.destroy() print_green("Deleted configmap %s-environment" % config.project_name) kctl = Kubectl(context, namespace=namespace) yaml_spec = YamlSpec(yaml_template).to_file() shout(kctl.command("delete -f %s" % yaml_spec), print_output=True) print_green("Deleted Kubernetes environment %s" % yaml_template)
def k8s_delete(context, namespace=None, filename=None): if filename is None: kubernetes_yml = os.path.join(CWD, HOKUSAI_CONFIG_DIR, "%s.yml" % context) else: kubernetes_yml = filename if not os.path.isfile(kubernetes_yml): raise HokusaiError("Yaml file %s does not exist." % kubernetes_yml) if filename is None: configmap = ConfigMap(context, namespace=namespace) configmap.destroy() print_green("Deleted configmap %s-environment" % config.project_name) kctl = Kubectl(context, namespace=namespace) shout(kctl.command("delete -f %s" % kubernetes_yml), print_output=True) print_green("Deleted Kubernetes environment %s" % kubernetes_yml)
def environment_create(context): kubernetes_yml = os.path.join(os.getcwd(), "hokusai/%s.yml" % context) if not os.path.isfile(kubernetes_yml): raise HokusaiError("Yaml file %s does not exist for given context." % kubernetes_yml) ecr = ECR() if not ecr.project_repository_exists(): raise HokusaiError( "ECR repository %s does not exist... did you run `hokusai setup` for this project?" % config.project_name) if not ecr.tag_exists('latest'): raise HokusaiError( "Image tag 'latest' does not exist... did you run `hokusai push`?") if not ecr.tag_exists(context): ecr.retag('latest', context) print_green("Updated tag 'latest' -> %s" % context) kctl = Kubectl(context) shout(kctl.command("create --save-config -f %s" % kubernetes_yml), print_output=True) print_green("Created remote environment %s" % context)
class Deployment(object): def __init__(self, context, deployment_name=None, namespace=None): self.context = context self.namespace = namespace self.kctl = Kubectl(self.context, namespace=namespace) self.ecr = ECR() if deployment_name: self.cache = [ self.kctl.get_object("deployment %s" % deployment_name) ] else: self.cache = self.kctl.get_objects( 'deployment', selector="app=%s,layer=application" % config.project_name) def update(self, tag, constraint, git_remote, timeout, update_config=False, filename=None): if not self.ecr.project_repo_exists(): raise HokusaiError("Project repo does not exist. Aborting.") digest = self.ecr.image_digest_for_tag(tag) if digest is None: raise HokusaiError( "Could not find an image digest for tag %s. Aborting." % tag) if self.namespace is None: print_green("Deploying %s to %s..." % (digest, self.context), newline_after=True) else: print_green("Deploying %s to %s/%s..." % (digest, self.context, self.namespace), newline_after=True) """ This logic should be refactored, but essentially if namespace and filename are provided, the caller is a review app, while if namespace is None it is either staging or production. If filename is unset for staging or production it is targeting the 'canonical' app, i.e. staging.yml or production.yml while if it is set it is trageting a 'canary' app. For the canonical app, run deploy hooks and post-depoy steps creating deployment tags For a canary app, skip deploy hooks and post-deploy steps For review apps, run deploy hooks but skip post-deploy steps For all deployment rollouts, if update_config or filename targets a yml file, bust the deployment cache using k8s field selectors and get deployments to watch the rollout from the yml file spec """ # Run the pre-deploy hook for the canonical app or a review app if config.pre_deploy and (filename is None or (filename and self.namespace)): print_green("Running pre-deploy hook '%s'..." % config.pre_deploy, newline_after=True) return_code = CommandRunner(self.context, namespace=self.namespace).run( tag, config.pre_deploy, constraint=constraint, tty=False) if return_code: raise HokusaiError( "Pre-deploy hook failed with return code %s" % return_code, return_code=return_code) # Patch the deployments deployment_timestamp = datetime.datetime.utcnow().strftime("%s%f") if filename is None: kubernetes_yml = os.path.join(CWD, HOKUSAI_CONFIG_DIR, "%s.yml" % self.context) else: kubernetes_yml = filename # If a review app, a canary app or the canonical app while updating config, # bust the deployment cache and populate deployments from the yaml file if filename or update_config: self.cache = [] for item in yaml.safe_load_all(open(kubernetes_yml, 'r')): if item['kind'] == 'Deployment': self.cache.append(item) # If updating config, path the spec and apply if update_config: print_green( "Patching Deployments in spec %s with image digest %s" % (kubernetes_yml, digest), newline_after=True) payload = [] for item in yaml.safe_load_all(open(kubernetes_yml, 'r')): if item['kind'] == 'Deployment': item['spec']['template']['metadata']['labels'][ 'deploymentTimestamp'] = deployment_timestamp item['spec']['progressDeadlineSeconds'] = timeout for container in item['spec']['template']['spec'][ 'containers']: if self.ecr.project_repo in container['image']: container['image'] = "%s@%s" % ( self.ecr.project_repo, digest) payload.append(item) f = NamedTemporaryFile(delete=False) f.write(YAML_HEADER) f.write(yaml.safe_dump_all(payload, default_flow_style=False)) f.close() print_green("Applying patched spec %s..." % f.name, newline_after=True) try: shout(self.kctl.command("apply -f %s" % f.name), print_output=True) finally: os.unlink(f.name) # If not updating config, patch the deployments in the cache and call kubectl patch to update else: for deployment in self.cache: containers = [(container['name'], container['image']) for container in deployment['spec']['template'] ['spec']['containers']] deployment_targets = [{ "name": name, "image": "%s@%s" % (self.ecr.project_repo, digest) } for name, image in containers if self.ecr.project_repo in image] patch = { "spec": { "template": { "metadata": { "labels": { "deploymentTimestamp": deployment_timestamp } }, "spec": { "containers": deployment_targets } }, "progressDeadlineSeconds": timeout } } print_green("Patching deployment %s..." % deployment['metadata']['name'], newline_after=True) shout( self.kctl.command( "patch deployment %s -p '%s'" % (deployment['metadata']['name'], json.dumps(patch)))) # Watch the rollouts in the cache and if any fail, roll back print_green("Waiting for deployment rollouts to complete...") rollout_commands = [ self.kctl.command("rollout status deployment/%s" % deployment['metadata']['name']) for deployment in self.cache ] return_codes = shout_concurrent(rollout_commands, print_output=True) if any(return_codes): print_red( "One or more deployment rollouts failed! Rolling back...", newline_before=True, newline_after=True) rollback_commands = [ self.kctl.command("rollout undo deployment/%s" % deployment['metadata']['name']) for deployment in self.cache ] shout_concurrent(rollback_commands, print_output=True) raise HokusaiError("Deployment failed!") post_deploy_success = True # Run the post-deploy hook for the canonical app or a review app if config.post_deploy and (filename is None or (filename and self.namespace)): print_green("Running post-deploy hook '%s'..." % config.post_deploy, newline_after=True) return_code = CommandRunner(self.context, namespace=self.namespace).run( tag, config.post_deploy, constraint=constraint, tty=False) if return_code: print_yellow( "WARNING: Running the post-deploy hook failed with return code %s" % return_code, newline_before=True, newline_after=True) print_yellow( "The image digest %s has been rolled out. However, you should run the post-deploy hook '%s' manually, or re-run this deployment." % (digest, config.post_deploy), newline_after=True) post_deploy_success = False # For the canonical app, create tags if filename is None: deployment_tag = "%s--%s" % ( self.context, datetime.datetime.utcnow().strftime("%Y-%m-%d--%H-%M-%S")) print_green("Updating ECR deployment tags in %s..." % self.ecr.project_repo, newline_after=True) try: self.ecr.retag(tag, self.context) print_green("Updated ECR tag %s -> %s" % (tag, self.context)) self.ecr.retag(tag, deployment_tag) print_green("Updated ECR tag %s -> %s" % (tag, deployment_tag), newline_after=True) except (ValueError, ClientError) as e: print_yellow( "WARNING: Updating ECR deployment tags failed due to the error: '%s'" % str(e), newline_before=True, newline_after=True) print_yellow( "The tag %s has been rolled out. However, you should create the ECR tags '%s' and '%s' manually, or re-run this deployment." % (tag, deployment_tag, self.context), newline_after=True) post_deploy_success = False remote = git_remote or config.git_remote if remote: print_green("Pushing Git deployment tags to %s..." % remote, newline_after=True) try: shout("git fetch %s" % remote) shout("git tag -f %s %s" % (self.context, tag), print_output=True) shout("git tag -f %s %s" % (deployment_tag, tag), print_output=True) shout("git push -f --no-verify %s refs/tags/%s" % (remote, self.context), print_output=True) print_green("Updated Git tag %s -> %s" % (tag, self.context)) shout("git push -f --no-verify %s refs/tags/%s" % (remote, deployment_tag), print_output=True) print_green("Updated Git tag %s -> %s" % (tag, deployment_tag), newline_after=True) except CalledProcessError as e: print_yellow( "WARNING: Creating Git deployment tags failed due to the error: '%s'" % str(e), newline_before=True, newline_after=True) print_yellow( "The tag %s has been rolled out. However, you should create the Git tags '%s' and '%s' manually, or re-run this deployment." % (tag, deployment_tag, self.context), newline_after=True) post_deploy_success = False if post_deploy_success: print_green("Deployment succeeded!") else: raise HokusaiError("One or more post-deploy steps failed!") def refresh(self): deployment_timestamp = datetime.datetime.utcnow().strftime("%s%f") for deployment in self.cache: patch = { "spec": { "template": { "metadata": { "labels": { "deploymentTimestamp": deployment_timestamp } } } } } print_green("Refreshing %s..." % deployment['metadata']['name'], newline_after=True) shout( self.kctl.command( "patch deployment %s -p '%s'" % (deployment['metadata']['name'], json.dumps(patch)))) print_green("Waiting for refresh to complete...") rollout_commands = [ self.kctl.command("rollout status deployment/%s" % deployment['metadata']['name']) for deployment in self.cache ] return_codes = shout_concurrent(rollout_commands, print_output=True) if any(return_codes): raise HokusaiError("Refresh failed!") @property def names(self): return [deployment['metadata']['name'] for deployment in self.cache]
class Deployment(object): def __init__(self, context): self.context = context self.kctl = Kubectl(self.context) self.cache = self.kctl.get_object('deployment', selector="app=%s,layer=application" % config.project_name) def update(self, tag, constraint): print_green("Deploying %s to %s..." % (tag, self.context)) if self.context != tag: ecr = ECR() ecr.retag(tag, self.context) print_green("Updated tag %s -> %s" % (tag, self.context)) deployment_tag = "%s--%s" % ( self.context, datetime.datetime.utcnow().strftime("%Y-%m-%d--%H-%M-%S")) ecr.retag(tag, deployment_tag) print_green("Updated tag %s -> %s" % (tag, deployment_tag)) if config.pre_deploy is not None: print_green("Running pre-deploy hook '%s' on %s..." % (config.pre_deploy, self.context)) return_code = CommandRunner(self.context).run( tag, config.pre_deploy, constraint=constraint) if return_code: raise HokusaiError( "Pre-deploy hook failed with return code %s" % return_code, return_code=return_code) deployment_timestamp = datetime.datetime.utcnow().strftime("%s%f") for deployment in self.cache: containers = deployment['spec']['template']['spec']['containers'] container_names = [container['name'] for container in containers] deployment_targets = [{ "name": name, "image": "%s:%s" % (config.aws_ecr_registry, tag) } for name in container_names] patch = { "spec": { "template": { "metadata": { "labels": { "deploymentTimestamp": deployment_timestamp } }, "spec": { "containers": deployment_targets } } } } print_green("Patching deployment %s..." % deployment['metadata']['name']) shout( self.kctl.command( "patch deployment %s -p '%s'" % (deployment['metadata']['name'], json.dumps(patch)))) print_green("Waiting for rollout to complete...") rollout_commands = [ self.kctl.command("rollout status deployment/%s" % deployment['metadata']['name']) for deployment in self.cache ] return_code = shout_concurrent(rollout_commands) if return_code: raise HokusaiError("Deployment failed!", return_code=return_code) if config.post_deploy is not None: print_green("Running post-deploy hook '%s' on %s..." % (config.post_deploy, self.context)) return_code = CommandRunner(self.context).run( tag, config.post_deploy, constraint=constraint) if return_code: raise HokusaiError( "Post-deploy hook failed with return code %s" % return_code, return_code=return_code) def refresh(self): deployment_timestamp = datetime.datetime.utcnow().strftime("%s%f") for deployment in self.cache: patch = { "spec": { "template": { "metadata": { "labels": { "deploymentTimestamp": deployment_timestamp } } } } } print_green("Refreshing %s..." % deployment['metadata']['name']) shout( self.kctl.command( "patch deployment %s -p '%s'" % (deployment['metadata']['name'], json.dumps(patch)))) print_green("Waiting for refresh to complete...") rollout_commands = [ self.kctl.command("rollout status deployment/%s" % deployment['metadata']['name']) for deployment in self.cache ] return_code = shout_concurrent(rollout_commands) if return_code: raise HokusaiError("Refresh failed!", return_code=return_code) def history(self, deployment_name): replicasets = self.kctl.get_object( 'replicaset', selector="app=%s,layer=application" % config.project_name) replicasets = filter( lambda rs: rs['metadata']['ownerReferences'][0]['name'] == deployment_name, replicasets) return sorted(replicasets, key=lambda rs: int(rs['metadata']['annotations'][ 'deployment.kubernetes.io/revision'])) @property def names(self): return [deployment['metadata']['name'] for deployment in self.cache] @property def current_tag(self): images = [] for deployment in self.cache: containers = deployment['spec']['template']['spec']['containers'] container_names = [container['name'] for container in containers] container_images = [container['image'] for container in containers] if not all(x == container_images[0] for x in container_images): raise HokusaiError( "Deployment's containers do not reference the same image tag", return_code=return_code) images.append(containers[0]['image']) if not all(y == images[0] for y in images): raise HokusaiError( "Deployments do not reference the same image tag", return_code=return_code) return images[0].rsplit(':', 1)[1]
class CommandRunner: def __init__(self, context, namespace=None): self.context = context self.kctl = Kubectl(self.context, namespace=namespace) self.ecr = ECR() def run(self, tag_or_digest, cmd, tty=None, env=(), constraint=()): if os.environ.get('USER') is not None: # The regex used for the validation of name is '[a-z0-9]([-a-z0-9]*[a-z0-9])?' user = re.sub("[^0-9a-z]+", "-", os.environ.get('USER').lower()) uuid = "%s-%s" % (user, k8s_uuid()) else: uuid = k8s_uuid() name = "%s-hokusai-run-%s" % (config.project_name, uuid) separator = "@" if ":" in tag_or_digest else ":" image_name = "%s%s%s" % (self.ecr.project_repo, separator, tag_or_digest) container = { "args": cmd.split(' '), "name": name, "image": image_name, "imagePullPolicy": "Always", 'envFrom': [{ 'configMapRef': { 'name': "%s-environment" % config.project_name } }] } run_tty = tty if tty is not None else config.run_tty if run_tty: container.update({"stdin": True, "stdinOnce": True, "tty": True}) if env: container['env'] = [] for s in env: if '=' not in s: raise HokusaiError( "Error: environment variables must be of the form 'KEY=VALUE'" ) split = s.split('=', 1) container['env'].append({'name': split[0], 'value': split[1]}) spec = {"containers": [container]} constraints = constraint or config.run_constraints if constraints: spec['nodeSelector'] = {} for label in constraints: if '=' not in label: raise HokusaiError( "Error: Node selectors must of the form 'key=value'") split = label.split('=', 1) spec['nodeSelector'][split[0]] = split[1] overrides = {"apiVersion": "v1", "spec": spec} if run_tty: shout(self.kctl.command( "run %s -t -i --image=%s --restart=Never --overrides=%s --rm" % (name, image_name, pipes.quote(json.dumps(overrides)))), print_output=True) else: return returncode( self.kctl.command( "run %s --attach --image=%s --overrides=%s --restart=Never --rm" % (name, image_name, pipes.quote(json.dumps(overrides)))))
class Deployment(object): def __init__(self, context, deployment_name=None, namespace=None): self.context = context self.namespace = namespace self.kctl = Kubectl(self.context, namespace=namespace) self.ecr = ECR() if deployment_name: self.cache = [ self.kctl.get_object("deployment %s" % deployment_name) ] else: self.cache = self.kctl.get_objects( 'deployment', selector="app=%s,layer=application" % config.project_name) def update(self, tag, constraint, git_remote, timeout, resolve_tag_sha1=True): if not self.ecr.project_repo_exists(): raise HokusaiError("Project repo does not exist. Aborting.") if resolve_tag_sha1: tag = self.ecr.find_git_sha1_image_tag(tag) if tag is None: raise HokusaiError( "Could not find a git SHA1 for tag %s. Aborting." % tag) if self.namespace is None: print_green("Deploying %s to %s..." % (tag, self.context), newline_after=True) else: print_green("Deploying %s to %s/%s..." % (tag, self.context, self.namespace), newline_after=True) if config.pre_deploy is not None: print_green("Running pre-deploy hook '%s'..." % config.pre_deploy, newline_after=True) return_code = CommandRunner(self.context, namespace=self.namespace).run( tag, config.pre_deploy, constraint=constraint, tty=False) if return_code: raise HokusaiError( "Pre-deploy hook failed with return code %s" % return_code, return_code=return_code) deployment_timestamp = datetime.datetime.utcnow().strftime("%s%f") for deployment in self.cache: containers = [(container['name'], container['image']) for container in deployment['spec']['template'] ['spec']['containers']] deployment_targets = [{ "name": name, "image": "%s:%s" % (self.ecr.project_repo, tag) } for name, image in containers if self.ecr.project_repo in image] patch = { "spec": { "template": { "metadata": { "labels": { "deploymentTimestamp": deployment_timestamp } }, "spec": { "containers": deployment_targets } }, "progressDeadlineSeconds": timeout } } print_green("Patching deployment %s..." % deployment['metadata']['name'], newline_after=True) shout( self.kctl.command( "patch deployment %s -p '%s'" % (deployment['metadata']['name'], json.dumps(patch)))) print_green("Waiting for deployment rollouts to complete...") rollout_commands = [ self.kctl.command("rollout status deployment/%s" % deployment['metadata']['name']) for deployment in self.cache ] return_codes = shout_concurrent(rollout_commands, print_output=True) if any(return_codes): print_red( "One or more deployment rollouts timed out! Rolling back...", newline_before=True, newline_after=True) rollback_commands = [ self.kctl.command("rollout undo deployment/%s" % deployment['metadata']['name']) for deployment in self.cache ] shout_concurrent(rollback_commands, print_output=True) raise HokusaiError("Deployment failed!") post_deploy_success = True if config.post_deploy is not None: print_green("Running post-deploy hook '%s'..." % config.post_deploy, newline_after=True) return_code = CommandRunner(self.context, namespace=self.namespace).run( tag, config.post_deploy, constraint=constraint, tty=False) if return_code: print_yellow( "WARNING: Running the post-deploy hook failed with return code %s" % return_code, newline_before=True, newline_after=True) print_yellow( "The tag %s has been rolled out. However, you should run the post-deploy hook '%s' manually, or re-run this deployment." % (tag, config.post_deploy), newline_after=True) post_deploy_success = False if self.namespace is None: deployment_tag = "%s--%s" % ( self.context, datetime.datetime.utcnow().strftime("%Y-%m-%d--%H-%M-%S")) print_green("Updating ECR deployment tags in %s..." % self.ecr.project_repo, newline_after=True) try: self.ecr.retag(tag, self.context) print_green("Updated ECR tag %s -> %s" % (tag, self.context)) self.ecr.retag(tag, deployment_tag) print_green("Updated ECR tag %s -> %s" % (tag, deployment_tag), newline_after=True) except (ValueError, ClientError) as e: print_yellow( "WARNING: Updating ECR deployment tags failed due to the error: '%s'" % str(e), newline_before=True, newline_after=True) print_yellow( "The tag %s has been rolled out. However, you should create the ECR tags '%s' and '%s' manually, or re-run this deployment." % (tag, deployment_tag, self.context), newline_after=True) post_deploy_success = False remote = git_remote or config.git_remote if remote is not None: print_green("Pushing Git deployment tags to %s..." % remote, newline_after=True) try: shout("git fetch %s" % remote) shout("git tag -f %s %s" % (self.context, tag), print_output=True) shout("git tag -f %s %s" % (deployment_tag, tag), print_output=True) shout("git push -f --no-verify %s refs/tags/%s" % (remote, self.context), print_output=True) print_green("Updated Git tag %s -> %s" % (tag, self.context)) shout("git push -f --no-verify %s refs/tags/%s" % (remote, deployment_tag), print_output=True) print_green("Updated Git tag %s -> %s" % (tag, deployment_tag), newline_after=True) except CalledProcessError as e: print_yellow( "WARNING: Creating Git deployment tags failed due to the error: '%s'" % str(e), newline_before=True, newline_after=True) print_yellow( "The tag %s has been rolled out. However, you should create the Git tags '%s' and '%s' manually, or re-run this deployment." % (tag, deployment_tag, self.context), newline_after=True) post_deploy_success = False if post_deploy_success: print_green("Deployment succeeded!") else: raise HokusaiError("One or more post-deploy steps failed!") def refresh(self): deployment_timestamp = datetime.datetime.utcnow().strftime("%s%f") for deployment in self.cache: patch = { "spec": { "template": { "metadata": { "labels": { "deploymentTimestamp": deployment_timestamp } } } } } print_green("Refreshing %s..." % deployment['metadata']['name'], newline_after=True) shout( self.kctl.command( "patch deployment %s -p '%s'" % (deployment['metadata']['name'], json.dumps(patch)))) print_green("Waiting for refresh to complete...") rollout_commands = [ self.kctl.command("rollout status deployment/%s" % deployment['metadata']['name']) for deployment in self.cache ] return_codes = shout_concurrent(rollout_commands, print_output=True) if any(return_codes): raise HokusaiError("Refresh failed!") @property def names(self): return [deployment['metadata']['name'] for deployment in self.cache] @property def current_tag(self): images = [] for deployment in self.cache: containers = deployment['spec']['template']['spec']['containers'] container_images = [ container['image'] for container in containers if self.ecr.project_repo in container['image'] ] if not container_images: raise HokusaiError( "Deployment has no valid target containers. Aborting.") if not all(x == container_images[0] for x in container_images): raise HokusaiError( "Deployment's containers do not reference the same image tag. Aborting." ) images.append(container_images[0]) if not all(y == images[0] for y in images): raise HokusaiError( "Deployments do not reference the same image tag. Aborting.") return images[0].rsplit(':', 1)[1]
class Deployment(object): def __init__(self, context, deployment_name=None, namespace=None): self.context = context self.namespace = namespace self.kctl = Kubectl(self.context, namespace=namespace) self.ecr = ECR() if deployment_name: self.cache = [ self.kctl.get_object("deployment %s" % deployment_name) ] else: self.cache = self.kctl.get_objects( 'deployment', selector="app=%s,layer=application" % config.project_name) def update(self, tag, constraint, git_remote, resolve_tag_sha1=True): if not self.ecr.project_repo_exists(): raise HokusaiError("Project repo does not exist. Aborting.") if resolve_tag_sha1: tag = self.ecr.find_git_sha1_image_tag(tag) if tag is None: raise HokusaiError( "Could not find a git SHA1 for tag %s. Aborting." % tag) if self.namespace is None: print_green("Deploying %s to %s..." % (tag, self.context)) else: print_green("Deploying %s to %s/%s..." % (tag, self.context, self.namespace)) if self.namespace is None: self.ecr.retag(tag, self.context) print_green("Updated tag %s -> %s" % (tag, self.context)) deployment_tag = "%s--%s" % ( self.context, datetime.datetime.utcnow().strftime("%Y-%m-%d--%H-%M-%S")) self.ecr.retag(tag, deployment_tag) print_green("Updated tag %s -> %s" % (tag, deployment_tag)) if git_remote is not None: print_green("Pushing deployment tags to %s..." % git_remote) shout("git tag -f %s" % self.context, print_output=True) shout("git tag %s" % deployment_tag, print_output=True) shout("git push --force %s --tags" % git_remote, print_output=True) if config.pre_deploy is not None: print_green("Running pre-deploy hook '%s'..." % config.pre_deploy) return_code = CommandRunner(self.context, namespace=self.namespace).run( tag, config.pre_deploy, constraint=constraint) if return_code: raise HokusaiError( "Pre-deploy hook failed with return code %s" % return_code, return_code=return_code) deployment_timestamp = datetime.datetime.utcnow().strftime("%s%f") for deployment in self.cache: containers = deployment['spec']['template']['spec']['containers'] container_names = [container['name'] for container in containers] deployment_targets = [{ "name": name, "image": "%s:%s" % (self.ecr.project_repo, tag) } for name in container_names] patch = { "spec": { "template": { "metadata": { "labels": { "deploymentTimestamp": deployment_timestamp } }, "spec": { "containers": deployment_targets } } } } print_green("Patching deployment %s..." % deployment['metadata']['name']) shout( self.kctl.command( "patch deployment %s -p '%s'" % (deployment['metadata']['name'], json.dumps(patch)))) print_green("Waiting for rollout to complete...") rollout_commands = [ self.kctl.command("rollout status deployment/%s" % deployment['metadata']['name']) for deployment in self.cache ] return_code = shout_concurrent(rollout_commands) if return_code: raise HokusaiError("Deployment failed!", return_code=return_code) if config.post_deploy is not None: print_green("Running post-deploy hook '%s'..." % config.post_deploy) return_code = CommandRunner(self.context, namespace=self.namespace).run( tag, config.post_deploy, constraint=constraint) if return_code: raise HokusaiError( "Post-deploy hook failed with return code %s" % return_code, return_code=return_code) def refresh(self): deployment_timestamp = datetime.datetime.utcnow().strftime("%s%f") for deployment in self.cache: patch = { "spec": { "template": { "metadata": { "labels": { "deploymentTimestamp": deployment_timestamp } } } } } print_green("Refreshing %s..." % deployment['metadata']['name']) shout( self.kctl.command( "patch deployment %s -p '%s'" % (deployment['metadata']['name'], json.dumps(patch)))) print_green("Waiting for refresh to complete...") rollout_commands = [ self.kctl.command("rollout status deployment/%s" % deployment['metadata']['name']) for deployment in self.cache ] return_code = shout_concurrent(rollout_commands) if return_code: raise HokusaiError("Refresh failed!", return_code=return_code) @property def names(self): return [deployment['metadata']['name'] for deployment in self.cache] @property def current_tag(self): images = [] for deployment in self.cache: containers = deployment['spec']['template']['spec']['containers'] container_names = [container['name'] for container in containers] container_images = [container['image'] for container in containers] if not all(x == container_images[0] for x in container_images): raise HokusaiError( "Deployment's containers do not reference the same image tag. Aborting." ) images.append(containers[0]['image']) if not all(y == images[0] for y in images): raise HokusaiError( "Deployments do not reference the same image tag. Aborting.") return images[0].rsplit(':', 1)[1]