def k8s_update(context, namespace=None, filename=None, check_branch="master", check_remote=None, skip_checks=False, dry_run=False): if filename is None: yaml_template = TemplateSelector().get(os.path.join(CWD, HOKUSAI_CONFIG_DIR, context)) else: yaml_template = TemplateSelector().get(filename) if not skip_checks: current_branch = None for branchname in shout('git branch').splitlines(): if '* ' in branchname: current_branch = branchname.replace('* ', '') break if 'detached' in current_branch: raise HokusaiError("Not on any branch! Aborting.") if current_branch != check_branch: raise HokusaiError("Not on %s branch! Aborting." % check_branch) remotes = [check_remote] if check_remote else shout('git remote').splitlines() for remote in remotes: shout("git fetch %s" % remote) if returncode("git diff --quiet %s/%s" % (remote, current_branch)): raise HokusaiError("Local branch %s is divergent from %s/%s. Aborting." % (current_branch, remote, current_branch)) kctl = Kubectl(context, namespace=namespace) yaml_spec = YamlSpec(yaml_template).to_file() if dry_run: shout(kctl.command("apply -f %s --dry-run" % yaml_spec), print_output=True) print_green("Updated Kubernetes environment %s (dry run)" % yaml_template) else: shout(kctl.command("apply -f %s" % yaml_spec), print_output=True) print_green("Updated Kubernetes environment %s" % yaml_template)
def k8s_status(context, resources, pods, describe, top, namespace=None, filename=None): if filename is None: yaml_template = TemplateSelector().get(os.path.join(CWD, HOKUSAI_CONFIG_DIR, context)) else: yaml_template = TemplateSelector().get(filename) kctl = Kubectl(context, namespace=namespace) yaml_spec = YamlSpec(yaml_template).to_file() if describe: kctl_cmd = "describe" output = "" else: kctl_cmd = "get" output = " -o wide" if resources: print_green("Resources", newline_before=True) print_green("===========") shout(kctl.command("%s -f %s%s" % (kctl_cmd, yaml_spec, output)), print_output=True) if pods: print_green("Pods", newline_before=True) print_green("===========") shout(kctl.command("%s pods --selector app=%s,layer=application%s" % (kctl_cmd, config.project_name, output)), print_output=True) if top: print_green("Top Pods", newline_before=True) print_green("===========") shout(kctl.command("top pods --selector app=%s,layer=application" % config.project_name), print_output=True)
def k8s_create(context, tag='latest', namespace=None, filename=None, environment=()): if filename is None: yaml_template = TemplateSelector().get(os.path.join(CWD, HOKUSAI_CONFIG_DIR, context)) else: yaml_template = TemplateSelector().get(filename) ecr = ECR() if not ecr.project_repo_exists(): raise HokusaiError("ECR repository %s does not exist... did you run `hokusai setup` for this project?" % config.project_name) if not ecr.tag_exists(tag): raise HokusaiError("Image tag %s does not exist... did you run `hokusai registry push`?" % tag) if tag == 'latest' and not ecr.tag_exists(context): ecr.retag(tag, context) print_green("Updated tag 'latest' -> %s" % context) if filename is None: configmap = ConfigMap(context, namespace=namespace) for s in environment: if '=' not in s: raise HokusaiError("Error: environment variables must be of the form 'KEY=VALUE'") split = s.split('=', 1) configmap.update(split[0], split[1]) configmap.create() print_green("Created configmap %s-environment" % config.project_name) kctl = Kubectl(context, namespace=namespace) yaml_spec = YamlSpec(yaml_template).to_file() shout(kctl.command("create --save-config -f %s" % yaml_spec), print_output=True) print_green("Created Kubernetes environment %s" % yaml_template)
def test_finds_explicit_file_or_errors(self): with self.assertRaises(HokusaiError): TemplateSelector().get(os.path.join(self.template_path, 'test.yml')) test_file = os.path.join(self.template_path, 'test.yml') open(test_file, 'a').close() self.assertEqual(TemplateSelector().get(os.path.join(self.template_path, 'test.yml')), test_file) os.remove(test_file)
def dev_start(build, detach, filename): if filename is None: yaml_template = TemplateSelector().get( os.path.join(CWD, HOKUSAI_CONFIG_DIR, DEVELOPMENT_YML_FILE)) else: yaml_template = TemplateSelector().get(filename) docker_compose_yml = YamlSpec(yaml_template).to_file() follow_extends(docker_compose_yml) def cleanup(*args): shout("docker-compose -f %s -p hokusai stop" % docker_compose_yml, print_output=True) for sig in EXIT_SIGNALS: signal.signal(sig, cleanup) opts = '' if build: Docker().build(filename=yaml_template) if detach: opts += ' -d' if not detach: print_green( "Starting development environment... Press Ctrl+C to stop.") shout("docker-compose -f %s -p hokusai up%s" % (docker_compose_yml, opts), print_output=True) if detach: print_green( "Run `hokousai dev stop` to shut down, or `hokusai dev logs --follow` to tail output." )
def dev_status(filename): if filename is None: yaml_template = TemplateSelector().get( os.path.join(CWD, HOKUSAI_CONFIG_DIR, DEVELOPMENT_YML_FILE)) else: yaml_template = TemplateSelector().get(filename) docker_compose_yml = YamlSpec(yaml_template).to_file() follow_extends(docker_compose_yml) shout("docker-compose -f %s -p hokusai ps" % docker_compose_yml, print_output=True)
def k8s_delete(context, namespace=None, filename=None): if filename is None: yaml_template = TemplateSelector().get(os.path.join(CWD, HOKUSAI_CONFIG_DIR, context)) else: yaml_template = TemplateSelector().get(filename) if filename is None: configmap = ConfigMap(context, namespace=namespace) configmap.destroy() print_green("Deleted configmap %s-environment" % config.project_name) kctl = Kubectl(context, namespace=namespace) yaml_spec = YamlSpec(yaml_template).to_file() shout(kctl.command("delete -f %s" % yaml_spec), print_output=True) print_green("Deleted Kubernetes environment %s" % yaml_template)
def build(self, filename=None): if filename is None: yaml_template = TemplateSelector().get( os.path.join(CWD, HOKUSAI_CONFIG_DIR, BUILD_YAML_FILE)) else: yaml_template = TemplateSelector().get(filename) docker_compose_yml = YamlSpec(yaml_template).to_file() build_command = "docker-compose -f %s -p hokusai build" % docker_compose_yml if config.pre_build: build_command = "%s && %s" % (config.pre_build, build_command) if config.post_build: build_command = "%s && %s" % (build_command, config.post_build) shout(build_command, print_output=True)
def test(build, cleanup, filename, service_name): if filename is None: yaml_template = TemplateSelector().get( os.path.join(CWD, HOKUSAI_CONFIG_DIR, TEST_YML_FILE)) else: yaml_template = TemplateSelector().get(filename) docker_compose_yml = YamlSpec(yaml_template).to_file() follow_extends(docker_compose_yml) def on_cleanup(*args): shout("docker-compose -f %s -p hokusai stop" % docker_compose_yml) shout("docker-compose -f %s -p hokusai rm --force" % docker_compose_yml) if cleanup: for sig in EXIT_SIGNALS: signal.signal(sig, on_cleanup) opts = ' --abort-on-container-exit' if build: Docker().build() if service_name is None: service_name = config.project_name print_green("Starting test environment... Press Ctrl+C to stop.", newline_after=True) try: shout("docker-compose -f %s -p hokusai up%s" % (docker_compose_yml, opts), print_output=True) return_code = int(shout("docker wait hokusai_%s_1" % service_name)) except CalledProcessError: if cleanup: on_cleanup() raise HokusaiError('Tests Failed') if return_code: raise HokusaiError('Tests Failed - Exit Code: %s\n' % return_code, return_code=return_code) else: print_green("Tests Passed") if cleanup: on_cleanup() return return_code
def dev_logs(follow, tail, filename): if filename is None: yaml_template = TemplateSelector().get( os.path.join(CWD, HOKUSAI_CONFIG_DIR, DEVELOPMENT_YML_FILE)) else: yaml_template = TemplateSelector().get(filename) docker_compose_yml = YamlSpec(yaml_template).to_file() follow_extends(docker_compose_yml) opts = '' if follow: opts += ' --follow' if tail: opts += " --tail=%i" % tail shout("docker-compose -f %s -p hokusai logs%s" % (docker_compose_yml, opts), print_output=True)
def dev_run(command, service_name, stop, filename): if filename is None: yaml_template = TemplateSelector().get( os.path.join(CWD, HOKUSAI_CONFIG_DIR, DEVELOPMENT_YML_FILE)) else: yaml_template = TemplateSelector().get(filename) docker_compose_yml = YamlSpec(yaml_template).to_file() follow_extends(docker_compose_yml) if service_name is None: service_name = config.project_name shout("docker-compose -f %s -p hokusai run %s %s" % (docker_compose_yml, service_name, command), print_output=True) if stop: shout("docker-compose -f %s -p hokusai stop" % docker_compose_yml, print_output=True)
def follow_extends(docker_compose_yml): with open(docker_compose_yml, 'r') as f: rendered_templates = [] struct = yaml.safe_load(f.read()) for service_name, service_spec in struct['services'].iteritems(): if 'extends' not in service_spec or 'file' not in service_spec['extends']: continue extended_filename = service_spec['extends']['file'] extended_template_path = os.path.join(CWD, HOKUSAI_CONFIG_DIR, extended_filename) if not os.path.isfile(extended_template_path): extended_template_path = os.path.join(CWD, HOKUSAI_CONFIG_DIR, extended_filename + '.j2') extended_template = TemplateSelector().get(extended_template_path) rendered_templates.append(YamlSpec(extended_template).to_file()) return rendered_templates
def check(): return_code = 0 def check_ok(check_item): print_green('\u2714 ' + check_item + ' found') def check_err(check_item): print_red('\u2718 ' + check_item + ' not found') try: config.project_name check_ok('Config project-name') except HokusaiError: check_err('Config project-name') try: shout('which docker') check_ok('docker') except CalledProcessError: check_err('docker') return_code += 1 try: shout('which docker-compose') check_ok('docker-compose') except CalledProcessError: check_err('docker-compose') return_code += 1 try: shout('which kubectl') check_ok('kubectl') except CalledProcessError: check_err('kubectl') return_code += 1 try: shout('which git') check_ok('git') except CalledProcessError: check_err('git') return_code += 1 try: boto3.client('sts', region_name=get_region_name()).get_caller_identity() check_ok('Valid AWS credentials') except (botoexceptions.ClientError, botoexceptions.NoCredentialsError): check_err('Valid AWS credentials') return_code += 1 ecr = ECR() if ecr.project_repo_exists(): check_ok("ECR repository '%s'" % config.project_name) else: check_err("ECR repository '%s'" % config.project_name) return_code += 1 try: build_template = TemplateSelector().get(os.path.join(CWD, HOKUSAI_CONFIG_DIR, BUILD_YAML_FILE)) check_ok(HOKUSAI_CONFIG_DIR + '/' + os.path.split(build_template)[-1]) except HokusaiError: check_err('hokusai/build.*') return_code += 1 try: development_template = TemplateSelector().get(os.path.join(CWD, HOKUSAI_CONFIG_DIR, DEVELOPMENT_YML_FILE)) check_ok(HOKUSAI_CONFIG_DIR + '/' + os.path.split(development_template)[-1]) except HokusaiError: check_err('hokusai/development.*') return_code += 1 try: test_template = TemplateSelector().get(os.path.join(CWD, HOKUSAI_CONFIG_DIR, TEST_YML_FILE)) check_ok(HOKUSAI_CONFIG_DIR + '/' + os.path.split(test_template)[-1]) except HokusaiError: check_err('hokusai/test.*') return_code += 1 for context in ['staging', 'production']: try: if context in Kubectl('staging').contexts(): check_ok("kubectl context '%s'" % context) else: check_err("kubectl context '%s'" % context) return_code += 1 except CalledProcessError: check_err('%s context' % context) return_code += 1 try: context_template = TemplateSelector().get(os.path.join(CWD, HOKUSAI_CONFIG_DIR, context)) check_ok(HOKUSAI_CONFIG_DIR + '/' + os.path.split(context_template)[-1]) except HokusaiError: check_err("hokusai/%s.*" % context) return_code += 1 return return_code
def test_errors_with_no_template_found(self): with self.assertRaises(HokusaiError): TemplateSelector().get(os.path.join(self.template_path, 'test'))
def test_finds_yaml_j2_file(self): test_file = os.path.join(self.template_path, 'test.yaml.j2') open(test_file, 'a').close() self.assertEqual(TemplateSelector().get(os.path.join(self.template_path, 'test')), test_file) os.remove(test_file)
def update(self, tag, constraint, git_remote, timeout, update_config=False, filename=None): if not self.ecr.project_repo_exists(): raise HokusaiError("Project repo does not exist. Aborting.") digest = self.ecr.image_digest_for_tag(tag) if digest is None: raise HokusaiError( "Could not find an image digest for tag %s. Aborting." % tag) digest_half = digest[len(digest) // 2:] if self.namespace is None: print_green("Deploying %s to %s..." % (digest, self.context), newline_after=True) else: print_green("Deploying %s to %s/%s..." % (digest, self.context, self.namespace), newline_after=True) """ This logic should be refactored, but essentially if namespace and filename are provided, the caller is a review app, while if namespace is None it is either staging or production. If filename is unset for staging or production it is targeting the 'canonical' app, i.e. staging.yml or production.yml while if it is set it is trageting a 'canary' app. For the canonical app, run deploy hooks and post-depoy steps creating deployment tags For a canary app, skip deploy hooks and post-deploy steps For review apps, run deploy hooks but skip post-deploy steps For all deployment rollouts, if update_config or filename targets a yml file, bust the deployment cache using k8s field selectors and get deployments to watch the rollout from the yml file spec """ # Run the pre-deploy hook for the canonical app or a review app if config.pre_deploy and (filename is None or (filename and self.namespace)): print_green("Running pre-deploy hook '%s'..." % config.pre_deploy, newline_after=True) return_code = CommandRunner(self.context, namespace=self.namespace).run( digest, config.pre_deploy, constraint=constraint, tty=False) if return_code: raise HokusaiError( "Pre-deploy hook failed with return code %s" % return_code, return_code=return_code) if filename is None: yaml_template = TemplateSelector().get( os.path.join(CWD, HOKUSAI_CONFIG_DIR, self.context)) else: yaml_template = TemplateSelector().get(filename) yaml_spec = YamlSpec(yaml_template).to_list() # If a review app, a canary app or the canonical app while updating config, # bust the deployment cache and populate deployments from the yaml file if filename or update_config: self.cache = [] for item in yaml_spec: if item['kind'] == 'Deployment': self.cache.append(item) # If updating config, patch the spec and apply if update_config: print_green( "Patching Deployments in spec %s with image digest %s" % (yaml_template, digest), newline_after=True) payload = [] for item in yaml_spec: if item['kind'] == 'Deployment': item['spec']['progressDeadlineSeconds'] = timeout item['metadata']['labels'][ 'app.kubernetes.io/version'] = digest_half item['spec']['template']['metadata']['labels'][ 'app.kubernetes.io/version'] = digest_half for container in item['spec']['template']['spec'][ 'containers']: if self.ecr.project_repo in container['image']: container['image'] = "%s@%s" % ( self.ecr.project_repo, digest) payload.append(item) f = NamedTemporaryFile(delete=False, dir=HOKUSAI_TMP_DIR, mode='w') f.write(YAML_HEADER) f.write(yaml.safe_dump_all(payload, default_flow_style=False)) f.close() print_green("Applying patched spec %s..." % f.name, newline_after=True) try: shout(self.kctl.command("apply -f %s" % f.name), print_output=True) finally: os.unlink(f.name) # If not updating config, patch the deployments in the cache and call kubectl patch to update else: for deployment in self.cache: containers = [(container['name'], container['image']) for container in deployment['spec']['template'] ['spec']['containers']] deployment_targets = [{ 'name': name, 'image': "%s@%s" % (self.ecr.project_repo, digest), } for name, image in containers if self.ecr.project_repo in image] patch = { "metadata": { "labels": { "app.kubernetes.io/version": digest_half } }, "spec": { "template": { "metadata": { "labels": { "app.kubernetes.io/version": digest_half } }, "spec": { "containers": deployment_targets } }, "progressDeadlineSeconds": timeout } } print_green("Patching deployment %s..." % deployment['metadata']['name'], newline_after=True) shout( self.kctl.command( "patch deployment %s -p '%s'" % (deployment['metadata']['name'], json.dumps(patch)))) # Watch the rollouts in the cache and if any fail, roll back print_green("Waiting for deployment rollouts to complete...") rollout_commands = [ self.kctl.command("rollout status deployment/%s" % deployment['metadata']['name']) for deployment in self.cache ] return_codes = shout_concurrent(rollout_commands, print_output=True) if any(return_codes): print_red( "One or more deployment rollouts failed! Rolling back...", newline_before=True, newline_after=True) rollback_commands = [ self.kctl.command("rollout undo deployment/%s" % deployment['metadata']['name']) for deployment in self.cache ] shout_concurrent(rollback_commands, print_output=True) raise HokusaiError("Deployment failed!") post_deploy_success = True # Run the post-deploy hook for the canonical app or a review app if config.post_deploy and (filename is None or (filename and self.namespace)): print_green("Running post-deploy hook '%s'..." % config.post_deploy, newline_after=True) return_code = CommandRunner(self.context, namespace=self.namespace).run( digest, config.post_deploy, constraint=constraint, tty=False) if return_code: print_yellow( "WARNING: Running the post-deploy hook failed with return code %s" % return_code, newline_before=True, newline_after=True) print_yellow( "The image digest %s has been rolled out. However, you should run the post-deploy hook '%s' manually, or re-run this deployment." % (digest, config.post_deploy), newline_after=True) post_deploy_success = False # For the canonical app, create tags if filename is None: deployment_tag = "%s--%s" % ( self.context, datetime.datetime.utcnow().strftime("%Y-%m-%d--%H-%M-%S")) print_green("Updating ECR deployment tags in %s..." % self.ecr.project_repo, newline_after=True) try: self.ecr.retag(tag, self.context) print_green("Updated ECR tag %s -> %s" % (tag, self.context)) self.ecr.retag(tag, deployment_tag) print_green("Updated ECR tag %s -> %s" % (tag, deployment_tag), newline_after=True) except (ValueError, ClientError) as e: print_yellow( "WARNING: Updating ECR deployment tags failed due to the error: '%s'" % str(e), newline_before=True, newline_after=True) print_yellow( "The tag %s has been rolled out. However, you should create the ECR tags '%s' and '%s' manually, or re-run this deployment." % (tag, deployment_tag, self.context), newline_after=True) post_deploy_success = False remote = git_remote or config.git_remote if remote: # Update git tags. Try up to 3 times, at 3 second intervals. Failure does not fail deployment. git_tag_sucess = False attempts = 0 while ((not git_tag_sucess) and (attempts < 3)): try: attempts += 1 print_green( "Creating Git deployment tags '%s', '%s', and pushing them to %s..." % (self.context, deployment_tag, remote)) print_green("Attempt# %s." % attempts) shout("git fetch -f %s --tags" % remote) shout("git tag -f %s %s" % (self.context, tag), print_output=True) shout("git tag -f %s %s" % (deployment_tag, tag), print_output=True) shout("git push -f --no-verify %s refs/tags/%s" % (remote, self.context), print_output=True) print_green("Updated Git tag %s -> %s" % (tag, self.context)) shout("git push -f --no-verify %s refs/tags/%s" % (remote, deployment_tag), print_output=True) print_green("Updated Git tag %s -> %s" % (tag, deployment_tag), newline_after=True) git_tag_sucess = True except CalledProcessError as e: # If subprocess.check_output() was called, the actual error is in CalledProcessError's 'output' attribute. print_yellow( "WARNING: Creating Git deployment tags failed due to the error:", newline_before=True, newline_after=True) print_yellow(e.output) time.sleep(3) if (not git_tag_sucess): print_yellow( "Failed all attempts at pushing Git deployment tags! Please do it manually.", newline_after=True) if post_deploy_success: print_green("Deployment succeeded!") else: raise HokusaiError("One or more post-deploy steps failed!")