def verify_sanity(ctx, namespace): # check if KUBECONFIG is set else find kubeconfig file and set the # environment variable constants = ctx.obj # set kubeconfig kubeconfig = os.environ.get('KUBECONFIG') if not kubeconfig: kubeconfigs = glob.glob(constants['project_dir'] + "/kubeconfig_*") if len(kubeconfigs) != 1: if len(kubeconfigs) == 0: print_success_msg('No kubeconfig found!!!') else: print_error_msg("multiple kubeconfigs found %s!!!" % repr(kubeconfigs)) return kubeconfig = kubeconfigs[0] set_kubeconfig_environ(kubeconfig) # check if we have a valid namespace all_namespaces = get_all_namespaces(kubeconfig) while namespace not in all_namespaces: namespace = click.prompt('Provide orc8r namespace', type=click.Choice(all_namespaces)) rc = run_playbook(verify_cmd(ctx.obj, namespace)) if rc != 0: print_error_msg("Post deployment verification checks failed!!!") sys.exit(1) print_success_msg("Post deployment verification ran successfully")
def raw_cleanup( constants: dict, override_dict: dict = None, dryrun: bool = False, max_retries: int = 2): """Perform raw cleanup of resources using internal commands Args: constants (dict): config dict overrides (dict): overide dict dryrun (bool): flag to indicate dryrun. Defaults to False. max_retries (int): maximum number of retries Returns: list: command list """ if not override_dict and not constants.get('cleanup_state'): backup_fn = tf_backup_fn(constants['project_dir']) if Path(backup_fn).exists(): constants['cleanup_state'] = backup_fn if override_dict: constants.update(override_dict) # sometimes cleanups might not fully happen due to timing related # resource dependencies. Run it few times to eliminate all resources # completely for i in range(max_retries): rc = run_playbook(cleanup_cmd(constants, dryrun)) if rc != 0: print_error_msg("Failed cleaning up resources!!!")
def set(self, component: str, key: str, value: str): click.echo(f"Setting key {key} value {value} " f"for component {component}") config_vars = self.config_vars[component] if not config_vars.get(key): print_error_msg(f"{key} not a valid attribute in {component}") return self.configs[component][key] = value
def precheck(ctx): """ Performs various checks to ensure successful upgrade """ rc = run_playbook(precheck_cmd(ctx.obj)) if rc != 0: print_error_msg("Upgrade prechecks failed!!!") sys.exit(1) print_success_msg("Upgrade prechecks ran successfully")
def set(self, component: str, key: str, value: str): click.echo(f"Setting key {key} value {value} " f"for component {component}") config_vars = self.config_vars[component] config_info = config_vars.get(key) if not config_info: print_error_msg(f"{key} not a valid attribute in {component}") return self.configs[component][key] = input_to_type(value, config_info.get('Type'))
def set_aws_configs(params: set, configs: dict): """Sets AWS configuration Args: params (set): List of aws configuration attributes configs (dict): Configuration map for a particular component """ for k, v in configs.items(): if k not in params: continue cmd = ["aws", "configure", "set", k, v] proc_inst = run_command(cmd) if proc_inst.returncode != 0: print_error_msg(f"Failed configuring aws with {k}")
def check(self, component: str) -> bool: ''' check if all mandatory options of a specific component is set ''' cfgs = self.configs[component] valid = True missing_cfgs = [] for k, v in self.config_vars[component].items(): if v['Required'] and cfgs.get(k) is None: missing_cfgs.append(k) valid = False if missing_cfgs: print_error_msg( f"Missing {missing_cfgs!r} configs for {component} component") else: print_success_msg( f"All mandatory configs for {component} has been configured") return valid
def setup_aws_environ(): """Set up aws configuration attributes in environment""" session = Session() creds = session.get_credentials() if not creds or not session.region_name: print_error_msg(''' AWS credentials not configured. configure through awscli or through orcl orcl configure set -k aws_access_key_id -v <access_key_id> orcl configure set -k aws_secret_access_key -v <aws_secret_access_key> orcl configure set -k region -v <region> ''') sys.exit(1) frozen_creds = creds.get_frozen_credentials() os.environ["AWS_ACCESS_KEY_ID"] = frozen_creds.access_key os.environ["AWS_SECRET_ACCESS_KEY"] = frozen_creds.secret_key os.environ["AWS_REGION"] = session.region_name
def upgrade(ctx): """ Upgrade existing orc8r deployment """ tf_cmds = [["terraform", "init", "--upgrade"], ["terraform", "refresh"], ["terraform", "apply", "-auto-approve"]] if ctx.invoked_subcommand is None: if click.confirm('Do you want to run upgrade prechecks?'): ctx.invoke(precheck) else: print_warning_msg(f"Skipping upgrade prechecks") click.echo("Following commands will be run during upgrade\n%s" % ("\n".join((map(" ".join, tf_cmds))))) for cmd in tf_cmds: if click.confirm('Do you want to continue with %s?' % " ".join(cmd)): rc = execute_command(cmd) if rc != 0: print_error_msg("Upgrade Failed!!!") return
def tf_install(constants: dict, warn: bool = True, max_retries: int = 2) -> int: """Run through terraform installation Args: constants (dict): config dict warn (bool, optional): require user confirmation. Defaults to True. max_retries (int): Number of times to retry in case of a failure. Returns: int: return code """ tf_init = ["terraform", "init"] tf_orc8r = ["terraform", "apply", "-target=module.orc8r", "-auto-approve"] tf_secrets = [ "terraform", "apply", "-target=module.orc8r-app.null_resource.orc8r_seed_secrets", "-auto-approve"] tf_orc8r_app = ["terraform", "apply", "-auto-approve"] for tf_cmd in [tf_init, tf_orc8r, tf_secrets, tf_orc8r_app]: cmd = " ".join(tf_cmd) if warn and not click.confirm(f'Do you want to continue with {cmd}?'): continue for i in range(max_retries): # terraform fails randomly due to timeouts click.echo(f"Running {tf_cmd}, iteration {i}") rc = execute_command(tf_cmd, cwd=constants['project_dir']) if rc == 0: break print_error_msg(f"Install failed when running {cmd} !!!") if i == (max_retries - 1): print_error_msg(f"Max retries exceeded!!!") return 1 # set the kubectl after bringing up the infra if tf_cmd in (tf_orc8r, tf_orc8r_app): kubeconfigs = glob.glob( constants['project_dir'] + "/kubeconfig_*") if len(kubeconfigs) != 1: print_error_msg( "zero or multiple kubeconfigs found %s!!!" % repr(kubeconfigs)) return kubeconfig = kubeconfigs[0] os.environ['KUBECONFIG'] = kubeconfig print_info_msg( 'For accessing kubernetes cluster, set' f' `export KUBECONFIG={kubeconfig}`') print_success_msg(f"Command {cmd} ran successfully") else: print_warning_msg(f"Skipping Command {cmd}") return 0
def tf_destroy( constants: dict, warn: bool = True, max_retries: int = 2, ) -> int: """Run through terraform cleanup Args: constants (dict): Config definitions warn (bool): require user confirmation. Defaults to True. max_retries (int): Number of times to retry in case of a failure. Returns: int: Return code """ if warn and not click.confirm( 'Do you want to continue with cleanup?', abort=True, ): return 0 # backup existing terraform state project_dir = constants['project_dir'] try: copyfile(tf_state_fn(project_dir), tf_backup_fn(project_dir)) except OSError: print_error_msg('Unable to backup terraform state') return 1 tf_destroy_cmds = ["terraform", "destroy", "-auto-approve"] cmd = " ".join(tf_destroy_cmds) for i in range(max_retries): click.echo(f"Running {cmd}, iteration {i}") rc = execute_command(tf_destroy_cmds, cwd=project_dir) if rc == 0: break print_error_msg("Destroy Failed!!!") if i == (max_retries - 1): print_error_msg( "Max retries exceeded!!! Attempt cleaning up using" " 'orcl cleanup raw' subcommand", ) return 1 return 0