def __init__(self, version: str, version_family: str): """Initialise all attributes""" # Initialize versions self.version: str = version self.version_family: str = version_family # File names self.dir: str = os.path.expanduser("~/.bwctl/") self.file: str = os.path.join(self.dir, 'config') # Ensure configuration directory exists try: os.makedirs(self.dir) except FileExistsError: # directory already exists pass # Initialise configuration self.config: Dict = {} try: with open(self.file, 'r') as config_f: self.config = yaml.safe_load(config_f) config_f.close() except FileNotFoundError: log_info("No configuration file found, starting clean") self.config = {}
def check_gcp(self) -> bool: """Check GCP credentials are provided""" credentials_missing: bool = False # Check 'google_cloud_keyfile_json' get_cloud: Result = self.get_cloud('gcp') if get_cloud.status: param: Dict = self.params_gcp[0] get_param: Result = self.get_gcp_param(param['key']) if not get_param.status: log_warn("{!r} is missing".format(param['key'])) credentials_missing = True else: try: with open(os.path.expanduser(get_param.value), 'r') as cred_f: self.credentials['gcp'] = json.load(cred_f) self.set_gcp_param(param['key'], get_param.value) cred_f.close() except FileNotFoundError: log_info("No GCP credentials file found: {!r}".format( get_param.value)) credentials_missing = True # Check 'project_id' param = self.params_gcp[1] get_param = self.get_gcp_param(param['key']) if not get_param.status: log_warn("{!r} is missing".format(param['key'])) credentials_missing = True else: log_warn("'gcp' section is missing") credentials_missing = True if credentials_missing: return False log_info("GCP credentials - OK") return True
def init_credentials_template(self) -> bool: """Check and copy credentials template""" src_credentials_file_name = os.path.join(os.path.dirname(bwctl.templates.__file__), 'credentials.yml') if not os.path.exists(self.credentials_template_file): log_info("No credentials file template found, copying clean...") shutil.copyfile(src_credentials_file_name, self.credentials_template_file) return True
def leave_fabric(ctx): """Leave current fabric""" if not ctx.obj.state.get_current_fabric(): log_error('Cannot leave. Please select fabric first.') sys.exit(1) log_info('Leaving fabric {0!r}'.format(ctx.obj.state.get_current_fabric())) return ctx.obj.set_current_fabric(None)
def ansible_process_output(self, line): """Filter ansible output""" if not self.debug: if re.match("PLAY|TASK|RUNNING", line): log_info(line.strip().strip('* ')) else: if bool(line.strip()): log_info(line.strip().strip('* '))
def delete_vpc(ctx, vpc_name, dry_run): """Delete VPC""" obj_kind = ObjectKind.VPC.value obj_name = vpc_name # Check if fabric is set if not ctx.obj.state.get_current_fabric(): log_error('Cannot delete VPC. Please select fabric first.') sys.exit(1) # Check if naming matches rules obj_name = ctx.obj.state.normalise_state_obj_name(obj_kind, obj_name) log_info('Deleting VPC {0!r}...'.format(obj_name)) # Check if VPC exists if not ctx.obj.state.check_vpc(ctx.obj.state.get_current_fabric(), obj_name): log_error("Cannot delete. VPC {0!r} doesn't exist".format(obj_name)) sys.exit(1) # Check if there are nodes in VPC node_list = [] for node_kind in [ObjectKind.ORCHESTRATOR.value, ObjectKind.PROCESSOR.value, ObjectKind.WORKLOAD.value]: node_list = node_list + [x for x in ctx.obj.state.get_fabric_objects(node_kind).items() if obj_name in x[1][obj_kind]] if node_list: log_error("Cannot delete. VPC {0!r} contains nodes, delete them first:".format(obj_name)) for node in node_list: log_error("{0!r}".format(node[0])) sys.exit(1) # Delete VPC if dry_run: log_warn('{0} {1!r} to be deleted (started with --dry-run)'.format(obj_kind.upper(), obj_name)) return True # Get credentials credentials = Credentials(ctx.obj.state.get_current_fabric(), ctx.obj.state.get(), ctx.obj.state.config) if not credentials.get(): log_error('Cannot delete VPC. Not able to get credentials') sys.exit(1) # Actual VPC deletion temp_state = deepcopy(ctx.obj.state) res = temp_state.delete_fabric_object(obj_kind, obj_name) if not res.status: log_error('{0} {1!r} cannot be deleted. {2!s}'.format(obj_kind.upper(), obj_name, res.value)) sys.exit(1) # Run Terraform with new state terraform = Terraform(temp_state.get_current_fabric(), temp_state, credentials, ctx.obj.version) if not terraform.plan_generate(): log_error('Cannot delete VPC. There is issue with terraform plan generation') sys.exit(1) terraform_result = terraform.plan_execute() if not terraform_result[0]: log_error('Cannot delete VPC. There is issue with terraform plan execution') sys.exit(terraform_result[1]) # Delete VPCs batches temp_state.nodebatch_delete(vpc=obj_name) # Set new state to be current and dump it ctx.obj.state = deepcopy(temp_state) ctx.obj.state.dump() log_ok('{0} {1!r} deleted successfully'.format(obj_kind.upper(), obj_name)) return True
def inventory_generate(self, node_list=None, node_vars=None, per_node_vars=None): """Generate ansible inventory""" state_fabric = self.state['fabric'][self.fabric] if node_vars is None: node_vars = [] if node_list is None: node_list = [] for obj_kind in [ ObjectKind.ORCHESTRATOR.value, ObjectKind.PROCESSOR.value, ObjectKind.WORKLOAD.value ]: node_list = node_list + list(state_fabric[obj_kind]) if per_node_vars is None: per_node_vars = {} log_info("Generate ansible inventory...") self.add_inventory_group('all') for node in node_list: inventory_node_vars = node_vars[:] if node in per_node_vars: inventory_node_vars += per_node_vars[node] node_kind = ObjectKind.WORKLOAD.value node_fqdn = '' for kind in [ ObjectKind.ORCHESTRATOR.value, ObjectKind.PROCESSOR.value, ObjectKind.WORKLOAD.value ]: if node in state_fabric[kind]: node_kind = kind node_fqdn = state_fabric[node_kind][node]['properties'][ 'fqdn'] self.add_inventory_host( node, state_fabric[node_kind][node]['properties']['ip'], node_fqdn, state_fabric['vpc'][state_fabric[node_kind][node] ['vpc']]['region'], inventory_node_vars) self.add_group_host(node, 'all') self.add_inventory_group(node_kind) self.add_group_host(node, node_kind) for node_cloud in ['aws', 'gcp', 'azr']: if node_cloud in state_fabric['vpc'][state_fabric[node_kind] [node]['vpc']]['cloud']: self.add_inventory_group(node_cloud) self.add_group_host(node, node_cloud) for node in state_fabric['orchestrator']: for orch_type in ['controller', 'telemetry', 'events']: if orch_type in state_fabric['orchestrator'][node]['type']: self.add_inventory_group(orch_type) if node in node_list: self.add_group_host(node, orch_type) dst_filename = os.path.join(self.config_dir, 'ansible_inventory') if not self.inventory_dump(self.inventory, dst_filename): return False return True
def request_validate_str(value, regexp=None): """Validate answer to match regexp""" if regexp is None: regexp = "^[a-zA-Z0-9-]+$" pattern = re.compile(regexp) if not pattern.match(value): log_info( "Please respond with value that match pattern {!r}".format(regexp)) return False, None return True, value
def parse_file(self, path: str) -> bool: """Get credentials from file""" log_info("Reading credentials file: {!r}".format(path)) try: with open(os.path.expanduser(path), 'r') as cred_f: self.credentials = yaml.safe_load(cred_f) cred_f.close() except FileNotFoundError: log_warn("No credentials file found: {!r}".format(path)) return False return True
def request_config_parameter_bool(message, default=False): """Requires answer with true/false""" prompt = " [{}]: ".format(str(default).lower()) while True: log_info(message + prompt, nl=False) choice = input().lower() if choice == "": return default elif request_validate_bool(choice)[0]: return request_validate_bool(choice)[1] else: pass
def confirm_yes_no(message, default="no"): """Requires confirmation with yes/no""" valid = {"yes": True, "y": True, "ye": True, "no": False, "n": False} prompt = " [y/N] " while True: log_info(message + prompt) choice = input().lower() if choice == '': return valid[default] elif choice in valid: return valid[choice] else: log_info("Please respond with 'yes' or 'no'")
def delete_workload(ctx, workload_name, dry_run): """Delete workload""" obj_kind = ObjectKind.WORKLOAD.value obj_name = workload_name # Check if fabric is set if not ctx.obj.state.get_current_fabric(): log_error('Cannot delete {0}. Please select fabric first.'.format(obj_kind)) sys.exit(1) # Check if naming matches rules obj_name = ctx.obj.state.normalise_state_obj_name(obj_kind, obj_name) log_info('Deleting {0} {1!r}...'.format(obj_kind, obj_name)) # Check if exists if not ctx.obj.state.check_workload(ctx.obj.state.get_current_fabric(), obj_name): log_error("Cannot delete. {0} {1!r} doesn't exist".format(obj_kind.title(), obj_name)) sys.exit(1) if dry_run: log_warn('{0} {1!r} to be deleted (started with --dry-run)'.format(obj_kind.title(), obj_name)) return True # Get credentials credentials = Credentials(ctx.obj.state.get_current_fabric(), ctx.obj.state.get(), ctx.obj.state.config) if not credentials.get(): log_error('Cannot delete {0}. Not able to get credentials'.format(obj_kind)) sys.exit(1) # Actual deletion temp_state = deepcopy(ctx.obj.state) res = temp_state.delete_fabric_object(obj_kind, obj_name) if not res.status: log_error('{0} {1!r} cannot be deleted. {2!s}'.format(obj_kind, obj_name, res.value)) sys.exit(1) # Run Terraform with new state terraform = Terraform(temp_state.get_current_fabric(), temp_state, credentials, ctx.obj.version) if not terraform.plan_generate(): log_error('Cannot delete {0}. There is issue with terraform plan generation'.format(obj_kind)) sys.exit(1) terraform_result = terraform.plan_execute() if not terraform_result[0]: log_error('Cannot delete {0}. There is issue with terraform plan execution'.format(obj_kind)) sys.exit(terraform_result[1]) # Set new state to be current and dump it ctx.obj.state = deepcopy(temp_state) ctx.obj.state.dump() log_ok('{0} {1!r} deleted successfully'.format(obj_kind.title(), obj_name)) # Generate SSH configuration ssh_config = SshConfig(ctx.obj.state) if ssh_config.generate_config(): log_info('Generating SSH config...') else: log_warn('Error during SSH config generation') return True
def get_aws(self) -> bool: """Get S3 state credentials from file""" temp_credentials = Credentials(self.fabric, self.state, self.config) log_info('Check if credentials are set for S3 repository') # Parse global credentials if configured no_credentials_file: bool = False credentials_file: str = self.config.get_attr('credentials_file') if bool(self.config.get_attr('credentials_file')): log_info('Global credentials file {!r} is configured'.format( credentials_file)) if not temp_credentials.parse_file(credentials_file): return False else: log_info('Global credentials file is not configured') no_credentials_file = True # Check credentials from file if not no_credentials_file: log_info('Checking global credentials for {!r}'.format('aws')) if temp_credentials.check_aws(): self.credentials['aws'] = deepcopy( temp_credentials.credentials['aws']) return True # Check environment variables if temp_credentials.get_env_aws(): self.credentials['aws'] = deepcopy( temp_credentials.credentials['aws']) return True return False
def bwctl(ctx, version): """Bayware CLI""" # Print version if version: ctx.obj.do_version() sys.exit(0) # Init credentials template if not ctx.obj.init_credentials_template(): log_info("Exiting...") sys.exit(1) # Run REPL if no command has been passed if ctx.invoked_subcommand is None: ctx.obj.set_cli_prefix() ctx.invoke(repl)
def delete_fabric(ctx, fabric_name): """Delete fabric""" # Check if naming matches rules fabric_name = ctx.obj.state.normalise_state_obj_name('fabric', fabric_name) log_info('Deleting fabric {0!r}...'.format(fabric_name)) # Check if fabric exists if not ctx.obj.state.check_fabric(fabric_name): log_error("Cannot delete. Fabric {0!r} doesn't exist".format(fabric_name)) sys.exit(1) # Check for existing VPCs vpc_list = ctx.obj.state.get_fabric_objects('vpc', fabric_name).items() if vpc_list: log_error("Cannot delete. Fabric {0!r} contains VPCs, delete them first:".format(fabric_name)) for vpc in vpc_list: log_error("{0!r}".format(vpc[0])) sys.exit(1) # Check for existing nodes node_list = [] for obj_kind in [ObjectKind.ORCHESTRATOR.value, ObjectKind.PROCESSOR.value, ObjectKind.WORKLOAD.value]: node_list = node_list + list(ctx.obj.state.get_fabric_objects(obj_kind, fabric_name)) if node_list: log_error("Cannot delete. Fabric {0!r} contains nodes, delete them first:".format(fabric_name)) for node in node_list: log_error("{0!r}".format(node[0])) sys.exit(1) # Delete fabric ansible_playbook = "delete-fabric.yml" ansible = Ansible(ctx.obj.state, fabric_name, ctx.obj.state.get_ssh_key()) log_info("Delete {0!r} fabric's files".format(fabric_name)) ansible_result = ansible.run_playbook(ansible_playbook, local=True) if not ansible_result[0]: log_error('Cannot delete fabric. There is issue with ansible playbook execution') sys.exit(ansible_result[1]) # If this is current fabric, unset it if ctx.obj.state.get_current_fabric() == fabric_name: ctx.obj.set_current_fabric(None) # Delete from state res = ctx.obj.state.delete_fabric(fabric_name) if not res.status: log_error('Fabric {0!r} cannot be deleted. {1!s}'.format(fabric_name, res.value)) sys.exit(1) # Delete fabrics batches ctx.obj.state.nodebatch_delete(fabric=fabric_name) # Success log_ok('Fabric {0!r} deleted successfully'.format(fabric_name)) ctx.obj.state.dump() return True
def get_env_gcp(self) -> bool: """Get GCP credentials from env""" credentials_missing: bool = False log_info("Checking environment variables: {}".format( [x['env'] for x in self.params_gcp])) for param in self.params_gcp: env_value: str = os.getenv(param['env'], '') if not env_value: log_warn("{!r} is not set".format(param['env'])) credentials_missing = True else: self.set_gcp_param(param['key'], env_value) if credentials_missing: return False log_info("GCP credentials - OK") return True
def check_azr(self) -> bool: """Check AZR credentials are provided""" credentials_missing: bool = False get_cloud: Result = self.get_cloud('azr') if get_cloud.status: for param in self.params_azr: get_param: Result = self.get_azr_param(param['key']) if not get_param.status: log_warn("{!r} is missing".format(param['key'])) credentials_missing = True else: log_warn("'azr' section is missing") credentials_missing = True if credentials_missing: return False log_info("AZR credentials - OK") return True
def request_config_parameter_str(message, default=None, regexp=None): """Requires answer with string value""" if default is None: prompt = " (value is required): " else: prompt = " [{}]: ".format(default) while True: log_info(message + prompt, nl=False) choice = input().lower() if default is None and choice == "": continue if default is not None and choice == "": return default if request_validate_str(choice, regexp)[0]: return request_validate_str(choice, regexp)[1] else: pass
def export_fabric(ctx, filename, output_format): """Export fabric specification to file""" # Check if fabric is set if not ctx.obj.state.get_current_fabric(): log_error("Please set fabric before exporting") sys.exit(1) log_info('Exporting to {0!r}'.format(filename)) export_spec = ExportSpec(fabric=ctx.obj.state.get_fabric( ctx.obj.state.get_current_fabric()), fabric_name=ctx.obj.state.get_current_fabric(), export_format=output_format, out_file=filename, api_version=ctx.obj.state.api_version) if export_spec.generate_spec(): log_ok('Fabric configuration exported successfully') return True log_error('Error exporting fabric configuration') sys.exit(1)
def get_output_variable(self, module, variable): """Get output variable from terraform plan""" cwd = os.getcwd() os.chdir(self.terraform_dir) try: cmd = sh.terraform("output", "-module=" + module, variable) except sh.ErrorReturnCode as err: log_info(err.full_cmd) log_info('Command output:' + err.stdout.decode('UTF-8').rstrip()) log_error(err.stderr.decode('UTF-8').rstrip(), nl=False) log_error("Unexpected terraform error during output (status code {0!r})".format(err.exit_code)) os.chdir(cwd) return None os.chdir(cwd) if not bool(cmd.strip()): return None else: return cmd.strip()
def plan_generate(self): """Generate terraform plan""" log_info("Generate terraform plan...") def show_item(item): """Print current item""" if item is not None: return '-> {0!s}'.format(item) with click.progressbar(self.TERRAFORM_TEMPLATES, item_show_func=show_item) as bar: for template in bar: dst_filename = os.path.join(self.terraform_dir, template) if not self.plan_dump( generate_from_template(self.state, self.fabric, self.hosted_zone, self.pub_key, self.config.get_attr('cloud_storage'), self.config.get_attr('fabric_manager'), self.username, self.aws_ec2_role, template + '.j2'), dst_filename): return False return True
def show_fabric(ctx, list_all, name): """Show fabric information""" fabric_key_order = [ 'state', 'status', 'config', 'vpc', 'orchestrator', 'processor', 'workload' ] current_fabric = ctx.obj.state.get_current_fabric() # If list_all - just list and end if list_all: click.echo('{:1s} {:15s}'.format(' ', 'FABRIC')) for fabric_key in ctx.obj.state.get().get('fabric'): click.echo('{0:^1s} {1:15s}'.format( '*' if fabric_key == current_fabric else ' ', fabric_key)) return True # Check if fabric is set if name is not None: # Check existence if not ctx.obj.state.check_fabric(name): log_error("Cannot show. Fabric {0!r} doesn't exist".format(name)) sys.exit(1) fabric_name = name else: if not current_fabric: log_info( 'Available fabrics listed. Use “bwctl set fabric FABRIC_NAME” to select fabric.' ) click.echo('{:1s} {:15s}'.format(' ', 'FABRIC')) for fabric_key in ctx.obj.state.get().get('fabric'): click.echo('{0:^1s} {1:15s}'.format( '*' if fabric_key == current_fabric else ' ', fabric_key)) sys.exit(1) fabric_name = current_fabric # List dict in defined order fabric_state = OrderedDict() for key in fabric_key_order: if key in ctx.obj.state.get_fabric(fabric_name): fabric_state[key] = ctx.obj.state.get_fabric(fabric_name)[key] click.echo('{0!s}:'.format(current_fabric)) click.echo(json.dumps(fabric_state, indent=2)) return True
def __init__(self, batch, api_version): """Initialise all attributes""" self.batch = batch # Is it batch? if not (bool(self.batch['kind']) and self.batch['kind'].lower() == ObjectKind.BATCH.value): raise TypeError("This is not a batch") self.bwctl_api_version = api_version self.batch_api_version = self.batch.get('apiVersion') self.batch_metadata = self.batch.get('metadata') self.batch_spec = self.batch.get('spec') self.spec = dict() for i in self.batch_spec: if i['kind'].lower() in self.spec: self.spec[i['kind'].lower()].append(i) else: self.spec[i['kind'].lower()] = [i] log_info("Found batch {0!r} ({1!s}) with {2!s} objects".format( self.batch_metadata['name'], self.batch_metadata['description'], len(self.batch_spec)))
def check_aws(self) -> bool: """Check AWS credentials are provided""" credentials_missing: bool = False get_cloud: Result = self.get_cloud('aws') if get_cloud.status: get_param: Result = self.get_aws_param('aws_ec2_role') if not get_param.status or not get_param.value: log_warn("{!r} is set to 'false'".format('aws_ec2_role')) for param in self.params_aws: get_param = self.get_aws_param(param['key']) if not get_param.status: log_warn("{!r} is missing".format(param['key'])) credentials_missing = True else: log_info("AWS credentials - OK (EC2 role is configured)") return True else: log_warn("'aws' section is missing") credentials_missing = True if credentials_missing: return False log_info("AWS credentials - OK") return True
def get_output_variable_with_retries(self, module, variable, retries): """Get output variable from terraform plan (with retries)""" terraform_refresh = [ "refresh", "-input=false", "-var", "gcp_credentials=" + self.gcp_credentials, "-var", "gcp_project_name=" + self.gcp_project_name, "-var", "azr_client_id=" + self.azr_client_id, "-var", "azr_client_secret=" + self.azr_client_secret, "-var", "azr_resource_group_name=" + self.azr_resource_group_name, "-var", "azr_subscription_id=" + self.azr_subscription_id, "-var", "azr_tennant_id=" + self.azr_tennant_id, "-var", "image_tag=" + self.image_tag, "-var", "image_version=" + self.image_family.replace(".", "-"), "-var", "dns_managed_zone_domain=" + self.hosted_zone, "-target=module." + module + ".azurerm_virtual_machine.vm" ] # Check if there AWS keys or EC2 role should be used if not self.aws_ec2_role: terraform_refresh.append("-var") terraform_refresh.append("aws_secret_key={}".format(self.s3_secret_key)) terraform_refresh.append("-var") terraform_refresh.append("aws_access_key={}".format(self.s3_access_key_id)) log_info("Getting {0!r} output for {1!r} from terraform state...".format(variable, module)) var = self.get_output_variable(module, variable) i = 1 cwd = os.getcwd() os.chdir(self.terraform_dir) while var is None and i <= retries: log_warn('Retry to get {0!r} output from terraforn state. ({1} of {2})'.format(variable, i, retries)) log_info('Refresh terraform module...') try: if not self.config.get_debug(): sh.terraform(terraform_refresh) else: cmd = sh.terraform(terraform_refresh, _out=self.terraform_process_output, _bg=True) cmd.wait() except sh.ErrorReturnCode as err: log_info(err.full_cmd) log_info('Command output:' + err.stdout.decode('UTF-8').rstrip()) log_error(err.stderr.decode('UTF-8').rstrip(), nl=False) log_error("Unexpected terraform error during refresh (status code {0!r})".format(err.exit_code)) var = self.get_output_variable(module, variable) i = i + 1 os.chdir(cwd) return var
def run_playbook(self, playbook_file_name, tag_list=None, local=None): """Run ansible playbook""" if not tag_list: tag_list = [] log_info( "Running ansible playbook {0!r}...".format(playbook_file_name)) # Prepare env cwd = os.getcwd() os.chdir(self.ansible_dir) os.environ['ANSIBLE_HOST_KEY_CHECKING'] = 'False' os.environ['ANSIBLE_RETRY_FILES_ENABLED'] = 'False' os.environ['ANSIBLE_SSH_RETRIES'] = '5' ansible_parameters = [ "-i", "localhost,", "-c", "local", "-u", os.getenv('USER', ''), "--extra-vars", "env_fabric_name=" + self.fabric, "--extra-vars", "env_customer_company_name=" + self.customer_company_name, "--extra-vars", "env_hosted_zone" + self.hosted_zone, playbook_file_name ] # Check if not local execution if local is None: ansible_parameters = [ "-i", "ansible_inventory.sh", playbook_file_name ] # Add tags if provided if tag_list: tags = "" ansible_parameters.append("--tags") for tag in tag_list: tags += tag + "," ansible_parameters.append(tags) # Run ansible playbook try: cmd = sh.ansible_playbook(ansible_parameters, _out=self.ansible_process_output, _bg=True) cmd.wait() except sh.ErrorReturnCode as err: log_info(err.full_cmd) log_info('Command output:' + err.stdout.decode('UTF-8').rstrip()) log_error(err.stderr.decode('UTF-8').rstrip(), nl=False) log_error( "Unexpected ansible playbook error (status code {0!r})".format( err.exit_code)) if os.path.exists(cwd): os.chdir(cwd) return False, err.exit_code if os.path.exists(cwd): os.chdir(cwd) return True, 0
def check_ssh(path: str) -> bool: """Checks if SSH key files are exist""" private_file_name: str = os.path.expanduser(path) public_file_name: str = private_file_name + '.pub' log_info("Checking SSH key files:") key_failure: bool = False if not os.path.isfile(private_file_name): log_warn("Private key file is not found: {!r}".format( private_file_name)) key_failure = True else: log_info("Private key file exists - OK") if not os.path.isfile(public_file_name): log_warn( "Public key file is not found: {!r}".format(public_file_name)) key_failure = True else: log_info("Public key file exists - OK") if key_failure: log_info( "NOTE: If private SSH key is provided as fabric configuration, " "the public part is also expected to exist") return False return True
def terraform_process_output(line): """Print terraform output""" if bool(line.strip()): log_info(line.strip())
def plan_execute(self): """Execute terraform plan""" terraform_steps = { 'init': [ "init", "-input=false", "-force-copy" ], 'apply': [ "apply", "-input=false", "-var", "gcp_credentials=" + self.gcp_credentials, "-var", "gcp_project_name=" + self.gcp_project_name, "-var", "azr_client_id=" + self.azr_client_id, "-var", "azr_client_secret=" + self.azr_client_secret, "-var", "azr_resource_group_name=" + self.azr_resource_group_name, "-var", "azr_subscription_id=" + self.azr_subscription_id, "-var", "azr_tennant_id=" + self.azr_tennant_id, "-var", "production=" + self.production, "-var", "bastion_ip=" + self.config.get_attr('fabric_manager')['ip'], "-var", "image_tag=" + self.image_tag, "-var", "image_version=" + self.image_family.replace(".", "-"), "-var", "dns_managed_zone_domain=" + self.hosted_zone, "-auto-approve" ] } # Check if there AWS keys or EC2 role should be used if not self.aws_ec2_role: terraform_steps['init'].append("-backend-config=secret_key={}".format(self.s3_secret_key)) terraform_steps['init'].append("-backend-config=access_key={}".format(self.s3_access_key_id)) terraform_steps['apply'].append("-var") terraform_steps['apply'].append("aws_secret_key={}".format(self.s3_secret_key)) terraform_steps['apply'].append("-var") terraform_steps['apply'].append("aws_access_key={}".format(self.s3_access_key_id)) cwd = os.getcwd() log_info("Running terraform init and apply...") os.chdir(self.terraform_dir) # Check if there is any terraform already running proc_name = 'terraform' proc_result = check_process_running(proc_name) if proc_result[0]: log_error("There is already {!r} process (PID {!r}) running for user {!r}. Please retry again " "later...".format(proc_name, str(proc_result[1]), proc_result[2])) return False, 1 def show_step(item): """Print current step""" # We need to return next step as progressbar prints previously completed step if item is not None: t_keys = list(terraform_steps.keys()) idx = t_keys.index(item) if idx == len(t_keys) - 1: return '-> {0}'.format(item) else: return '-> {0}'.format(t_keys[idx + 1]) if not self.config.get_debug(): with click.progressbar(terraform_steps, item_show_func=show_step, show_eta=False) as bar: for step in bar: try: sh.terraform(terraform_steps[step]) except sh.ErrorReturnCode as err: log_info(err.full_cmd) log_info('Command output:' + err.stdout.decode('UTF-8').rstrip()) log_error(err.stderr.decode('UTF-8').rstrip(), nl=False) log_error("Unexpected terraform error during {0!s} (status code {1!r})".format(step, err.exit_code)) os.chdir(cwd) return False, err.exit_code else: for step in terraform_steps: try: cmd = sh.terraform(terraform_steps[step], _out=self.terraform_process_output, _bg=True) cmd.wait() except sh.ErrorReturnCode as err: log_info(err.full_cmd) log_info('Command output:' + err.stdout.decode('UTF-8').rstrip()) log_error(err.stderr.decode('UTF-8').rstrip(), nl=False) log_error("Unexpected terraform error during {0!s} (status code {1!r})".format(step, err.exit_code)) os.chdir(cwd) return False, err.exit_code os.chdir(cwd) return True, 0
def start_workload(ctx, workload_name, dry_run, all_nodes): """Start workload""" obj_kind = ObjectKind.WORKLOAD.value obj_list, obj_name = None, None # Check if fabric is set if not ctx.obj.state.get_current_fabric(): log_error( 'Cannot start {0}. Please select fabric first.'.format(obj_kind)) sys.exit(1) # Check target objects to be used if all_nodes: if workload_name: log_error( "Cannot start. Either {}-NAME or option --all should be used". format(obj_kind.upper())) sys.exit(1) obj_list = [ x[0] for x in ctx.obj.state.get_fabric_objects(obj_kind).items() if not ctx.obj.state.check_object_state(x[1], ObjectState.DELETING) and not ctx.obj.state.check_object_state(x[1], ObjectState.CREATED) ] if not obj_list: log_error( "Cannot start. There are no configured {}s".format(obj_kind)) sys.exit(1) elif workload_name: # Check if naming matches rules obj_name = ctx.obj.state.normalise_state_obj_name( obj_kind, workload_name) # Check if exist if not ctx.obj.state.check_workload(ctx.obj.state.get_current_fabric(), obj_name): log_error("Cannot start. {0} {1!r} doesn't exist".format( obj_kind.title(), obj_name)) sys.exit(1) # Check state obj = ctx.obj.state.get_fabric_object(obj_kind, obj_name) if ctx.obj.state.check_object_state(obj, ObjectState.DELETING): log_error("Cannot proceed, object is set for deletion!") sys.exit(1) if ctx.obj.state.check_object_state(obj, ObjectState.STARTED) and \ ctx.obj.state.check_object_status(obj, ObjectStatus.SUCCESS): log_warn("{0} {1!r} is already started".format( obj_kind.title(), obj_name)) return True if not ctx.obj.state.check_object_state( obj, [ObjectState.CONFIGURED, ObjectState.STOPPED]): log_error("Cannot start. {0} {1!r} should be configured".format( obj_kind.title(), obj_name)) sys.exit(1) obj_list = [workload_name] elif workload_name is None: log_error( "Cannot start. Either {}-NAME or option --all should be used". format(obj_kind.upper())) sys.exit(1) # Start dry-run if dry_run: log_warn('{0} {1!r} to be started (used with --dry-run)'.format( obj_kind.title(), obj_name)) return True log_info('{0}s to be started: {1!r}'.format(obj_kind.title(), obj_list)) # Start action temp_state = deepcopy(ctx.obj.state) res = temp_state.workload_start(temp_state.get_current_fabric(), obj_list) # Set temp state to current and dump it ctx.obj.state = deepcopy(temp_state) ctx.obj.state.dump() if not res[0]: sys.exit(res[1]) return True