def init_attr_fabric_manager(self) -> bool: """Init configuration attributes for fabric manager""" # Set default value fabric_manager: Dict = {} if not bool(self.get_attr('fabric_manager')): self.set_attr('fabric_manager', fabric_manager, override=False) fabric_manager = self.get_attr('fabric_manager') if 'id' not in fabric_manager or 'ip' not in fabric_manager: hostname: str = socket.gethostname() username: str = getpass.getuser() try: ip: str = requests.get('https://api.ipify.org').text except (requests.ConnectionError, requests.ConnectionError, requests.HTTPError, requests.URLRequired, requests.Timeout, requests.TooManyRedirects) as l_err: log_error( "Unexpected error: Not able to get fabric manager's IP address ({0!s})" .format(l_err)) return False fabric_manager['id'] = hostname + '_' + ip + '_' + username fabric_manager['ip'] = ip if 'company_name' not in fabric_manager: fabric_manager['company_name'] = '' self.set_attr('fabric_manager', fabric_manager, override=True) return True
def leave_fabric(ctx): """Leave current fabric""" if not ctx.obj.state.get_current_fabric(): log_error('Cannot leave. Please select fabric first.') sys.exit(1) log_info('Leaving fabric {0!r}'.format(ctx.obj.state.get_current_fabric())) return ctx.obj.set_current_fabric(None)
def run_playbook(self, playbook_file_name, tag_list=None, local=None): """Run ansible playbook""" if not tag_list: tag_list = [] log_info( "Running ansible playbook {0!r}...".format(playbook_file_name)) # Prepare env cwd = os.getcwd() os.chdir(self.ansible_dir) os.environ['ANSIBLE_HOST_KEY_CHECKING'] = 'False' os.environ['ANSIBLE_RETRY_FILES_ENABLED'] = 'False' os.environ['ANSIBLE_SSH_RETRIES'] = '5' ansible_parameters = [ "-i", "localhost,", "-c", "local", "-u", os.getenv('USER', ''), "--extra-vars", "env_fabric_name=" + self.fabric, "--extra-vars", "env_customer_company_name=" + self.customer_company_name, "--extra-vars", "env_hosted_zone" + self.hosted_zone, playbook_file_name ] # Check if not local execution if local is None: ansible_parameters = [ "-i", "ansible_inventory.sh", playbook_file_name ] # Add tags if provided if tag_list: tags = "" ansible_parameters.append("--tags") for tag in tag_list: tags += tag + "," ansible_parameters.append(tags) # Run ansible playbook try: cmd = sh.ansible_playbook(ansible_parameters, _out=self.ansible_process_output, _bg=True) cmd.wait() except sh.ErrorReturnCode as err: log_info(err.full_cmd) log_info('Command output:' + err.stdout.decode('UTF-8').rstrip()) log_error(err.stderr.decode('UTF-8').rstrip(), nl=False) log_error( "Unexpected ansible playbook error (status code {0!r})".format( err.exit_code)) if os.path.exists(cwd): os.chdir(cwd) return False, err.exit_code if os.path.exists(cwd): os.chdir(cwd) return True, 0
def plan_dump(attr, filename): """Dump terraform template to a file""" # noinspection PyBroadException try: with open(filename, 'w') as dump_f: dump_f.write(attr) dump_f.close() except IOError as dump_err: log_error("{0} - I/O error({1}): {2}".format(filename, dump_err.errno, dump_err.strerror)) return False except Exception: # handle other exceptions such as attribute errors log_error("{0} - Unexpected error: {1}".format(filename, sys.exc_info()[0])) return False return True
def inventory_dump(attr, filename): """Dump ansible inventory to a file""" # noinspection PyBroadException try: with open(filename, 'w') as dump_f: json.dump(attr, dump_f) dump_f.close() except IOError as dump_err: log_error("{0} - I/O error({1}): {2}".format( filename, dump_err.errno, dump_err.strerror)) return False except Exception: # handle other exceptions such as attribute errors log_error("{0} - Unexpected error: {1}".format( filename, sys.exc_info()[0])) return False return True
def get_output_variable_with_retries(self, module, variable, retries): """Get output variable from terraform plan (with retries)""" terraform_refresh = [ "refresh", "-input=false", "-var", "gcp_credentials=" + self.gcp_credentials, "-var", "gcp_project_name=" + self.gcp_project_name, "-var", "azr_client_id=" + self.azr_client_id, "-var", "azr_client_secret=" + self.azr_client_secret, "-var", "azr_resource_group_name=" + self.azr_resource_group_name, "-var", "azr_subscription_id=" + self.azr_subscription_id, "-var", "azr_tennant_id=" + self.azr_tennant_id, "-var", "image_tag=" + self.image_tag, "-var", "image_version=" + self.image_family.replace(".", "-"), "-var", "dns_managed_zone_domain=" + self.hosted_zone, "-target=module." + module + ".azurerm_virtual_machine.vm" ] # Check if there AWS keys or EC2 role should be used if not self.aws_ec2_role: terraform_refresh.append("-var") terraform_refresh.append("aws_secret_key={}".format(self.s3_secret_key)) terraform_refresh.append("-var") terraform_refresh.append("aws_access_key={}".format(self.s3_access_key_id)) log_info("Getting {0!r} output for {1!r} from terraform state...".format(variable, module)) var = self.get_output_variable(module, variable) i = 1 cwd = os.getcwd() os.chdir(self.terraform_dir) while var is None and i <= retries: log_warn('Retry to get {0!r} output from terraforn state. ({1} of {2})'.format(variable, i, retries)) log_info('Refresh terraform module...') try: if not self.config.get_debug(): sh.terraform(terraform_refresh) else: cmd = sh.terraform(terraform_refresh, _out=self.terraform_process_output, _bg=True) cmd.wait() except sh.ErrorReturnCode as err: log_info(err.full_cmd) log_info('Command output:' + err.stdout.decode('UTF-8').rstrip()) log_error(err.stderr.decode('UTF-8').rstrip(), nl=False) log_error("Unexpected terraform error during refresh (status code {0!r})".format(err.exit_code)) var = self.get_output_variable(module, variable) i = i + 1 os.chdir(cwd) return var
def export_fabric(ctx, filename, output_format): """Export fabric specification to file""" # Check if fabric is set if not ctx.obj.state.get_current_fabric(): log_error("Please set fabric before exporting") sys.exit(1) log_info('Exporting to {0!r}'.format(filename)) export_spec = ExportSpec(fabric=ctx.obj.state.get_fabric( ctx.obj.state.get_current_fabric()), fabric_name=ctx.obj.state.get_current_fabric(), export_format=output_format, out_file=filename, api_version=ctx.obj.state.api_version) if export_spec.generate_spec(): log_ok('Fabric configuration exported successfully') return True log_error('Error exporting fabric configuration') sys.exit(1)
def get_output_variable(self, module, variable): """Get output variable from terraform plan""" cwd = os.getcwd() os.chdir(self.terraform_dir) try: cmd = sh.terraform("output", "-module=" + module, variable) except sh.ErrorReturnCode as err: log_info(err.full_cmd) log_info('Command output:' + err.stdout.decode('UTF-8').rstrip()) log_error(err.stderr.decode('UTF-8').rstrip(), nl=False) log_error("Unexpected terraform error during output (status code {0!r})".format(err.exit_code)) os.chdir(cwd) return None os.chdir(cwd) if not bool(cmd.strip()): return None else: return cmd.strip()
def set_fabric(ctx, fabric_name): """Set current fabric""" # Check fabric exists if not ctx.obj.state.check_fabric(fabric_name): log_error("Fabric {!r} doesn't exist".format(fabric_name)) sys.exit(1) fabric = ctx.obj.state.get_fabric(fabric_name) # Check fabric state and status if ctx.obj.state.check_object_state(fabric, ObjectState.DELETING): log_warn("Proceeding, but object is set for deletion!") ctx.obj.set_current_fabric(fabric_name) log_ok('Active fabric: {0!r}'.format(fabric_name)) return True if not ctx.obj.state.check_object_state(fabric, ObjectState.CONFIGURED) or \ not ctx.obj.state.check_object_status(fabric, ObjectStatus.SUCCESS): log_error("Cannot activate, configure fabric {0!r} first".format(fabric_name)) sys.exit(1) else: ctx.obj.set_current_fabric(fabric_name) log_ok('Active fabric: {0!r}'.format(fabric_name)) return True
def show_fabric(ctx, list_all, name): """Show fabric information""" fabric_key_order = [ 'state', 'status', 'config', 'vpc', 'orchestrator', 'processor', 'workload' ] current_fabric = ctx.obj.state.get_current_fabric() # If list_all - just list and end if list_all: click.echo('{:1s} {:15s}'.format(' ', 'FABRIC')) for fabric_key in ctx.obj.state.get().get('fabric'): click.echo('{0:^1s} {1:15s}'.format( '*' if fabric_key == current_fabric else ' ', fabric_key)) return True # Check if fabric is set if name is not None: # Check existence if not ctx.obj.state.check_fabric(name): log_error("Cannot show. Fabric {0!r} doesn't exist".format(name)) sys.exit(1) fabric_name = name else: if not current_fabric: log_info( 'Available fabrics listed. Use “bwctl set fabric FABRIC_NAME” to select fabric.' ) click.echo('{:1s} {:15s}'.format(' ', 'FABRIC')) for fabric_key in ctx.obj.state.get().get('fabric'): click.echo('{0:^1s} {1:15s}'.format( '*' if fabric_key == current_fabric else ' ', fabric_key)) sys.exit(1) fabric_name = current_fabric # List dict in defined order fabric_state = OrderedDict() for key in fabric_key_order: if key in ctx.obj.state.get_fabric(fabric_name): fabric_state[key] = ctx.obj.state.get_fabric(fabric_name)[key] click.echo('{0!s}:'.format(current_fabric)) click.echo(json.dumps(fabric_state, indent=2)) return True
def config_orchestrator(ctx, orchestrator_name, all_nodes, dry_run, file): """Configure orchestrator""" obj_kind = ObjectKind.ORCHESTRATOR.value # Check if fabric is set if not ctx.obj.state.get_current_fabric(): log_error('Cannot configure {0}. Please select fabric first.'.format( obj_kind)) sys.exit(1) # Generate temporary state temp_state = deepcopy(ctx.obj.state) # Check target objects to be used obj_list = [] if all_nodes: if orchestrator_name: log_error( "Cannot configure. Either {}-NAME or option --all should be used" .format(obj_kind.upper())) sys.exit(1) obj_all = [ x[0] for x in temp_state.get_fabric_objects(obj_kind).items() ] obj_created_failed = [ x[0] for x in temp_state.get_fabric_objects(obj_kind).items() if temp_state.check_object_state(x[1], ObjectState.CREATED) and temp_state.check_object_status(x[1], ObjectStatus.FAILED) ] obj_deleting = [ x[0] for x in temp_state.get_fabric_objects(obj_kind).items() if temp_state.check_object_state(x[1], ObjectState.DELETING) ] obj_list = [ x for x in obj_all if x not in obj_deleting and x not in obj_created_failed ] if not obj_list: states = [ ObjectState.CREATED.value, ObjectState.CONFIGURED.value, ObjectState.UPDATED.value ] log_error( "Cannot configure. There are no {}s in states: {!r}".format( obj_kind, states)) sys.exit(1) if obj_created_failed: log_warn('There are failed {}s during creation: {!r}. Skipping...'. format(obj_kind.lower(), obj_created_failed)) if obj_deleting: log_warn( 'There are {}s in deleting state: {!r}. Skipping...'.format( obj_kind.lower(), obj_deleting)) elif orchestrator_name: # Check if naming matches rules obj_name = temp_state.normalise_state_obj_name(obj_kind, orchestrator_name) # Check if exist if not temp_state.check_orchestrator(temp_state.get_current_fabric(), obj_name): log_error("Cannot configure. {0} {1!r} doesn't exist".format( obj_kind.title(), obj_name)) sys.exit(1) # Check state obj = temp_state.get_fabric_object(obj_kind, orchestrator_name) if temp_state.check_object_state(obj, ObjectState.DELETING): log_error("Cannot proceed, object is set for deletion!") sys.exit(1) if temp_state.check_object_state(obj, ObjectState.CREATED) and \ temp_state.check_object_status(obj, ObjectStatus.FAILED): log_error( "{0} was created with failures. Run create {1} again before configure" .format(obj_kind.title(), obj_kind)) sys.exit(1) obj_list = [orchestrator_name] elif orchestrator_name is None: log_error( "Cannot configure. Either {}-NAME or option --all should be used". format(obj_kind.upper())) sys.exit(1) # Configure dry-run if dry_run: log_warn('{0}s {1!r} to be configured (used with --dry-run)'.format( obj_kind.title(), obj_list)) return True # Check if controller and telemetry are not configured yet passwd_controller = None passwd_grafana = None for orch in obj_list: obj = temp_state.get_fabric_object(obj_kind, orch) if obj['type'] == 'controller' and temp_state.check_object_state(obj, ObjectState.CREATED) and \ temp_state.check_object_status(obj, ObjectStatus.SUCCESS): passwd_controller = temp_state.get_passwd() if obj['type'] == 'telemetry' and temp_state.check_object_state(obj, ObjectState.CREATED) and \ temp_state.check_object_status(obj, ObjectStatus.SUCCESS): passwd_grafana = temp_state.get_passwd() # Check dockerhub credentials credentials = None for orch in obj_list: obj = temp_state.get_fabric_object(obj_kind, orch) if obj['type'] == 'controller': credentials = Credentials(temp_state.get_current_fabric(), temp_state.get(), ctx.obj.state.config) if not credentials.get_docker(): log_error( 'Cannot configure. Not able to get Bayware dockerhub credentials' ) sys.exit(1) # Configure orchestrator res = temp_state.orchestrator_configure( temp_state.get_current_fabric(), obj_list, controller_passwd=passwd_controller, grafana_passwd=passwd_grafana, credentials=credentials) if not res[0]: sys.exit(res[1]) # Show grafana password if passwd_grafana is not None: temp_state.show_passwd(passwd_grafana, 'grafana') # Show controller password if passwd_controller is not None: temp_state.show_passwd(passwd_controller, 'controller') # Dump state ctx.obj.state = deepcopy(temp_state) ctx.obj.state.dump() if file: click.echo('File: {0}'.format(file)) return True
bwctl.add_command(init_cmd) bwctl.add_command(leave_cmd) bwctl.add_command(restart_cmd) bwctl.add_command(set_cmd) bwctl.add_command(show_cmd) bwctl.add_command(start_cmd) bwctl.add_command(stop_cmd) bwctl.add_command(update_cmd) # pylint: disable=no-value-for-parameter,unexpected-keyword-arg bwctl(obj=Session(), help_option_names=["-h", "--help"], max_content_width=120, auto_envvar_prefix="BW") # Handle bwctl PID locking PID_NAME = 'bwctl' PID_DIR = os.path.expanduser("~/.bwctl") try: with PidFile(PID_NAME, piddir=PID_DIR) as p: main() except PidFileAlreadyLockedError: log_error('Lock detected, bwctl is already running. Exiting...') sys.exit(1) except IOError: log_error( 'Unable to create lockfile {!r}, please check permissions. Exiting...'. format(os.path.join(PID_DIR, PID_NAME))) sys.exit(1)
def start_workload(ctx, workload_name, dry_run, all_nodes): """Start workload""" obj_kind = ObjectKind.WORKLOAD.value obj_list, obj_name = None, None # Check if fabric is set if not ctx.obj.state.get_current_fabric(): log_error( 'Cannot start {0}. Please select fabric first.'.format(obj_kind)) sys.exit(1) # Check target objects to be used if all_nodes: if workload_name: log_error( "Cannot start. Either {}-NAME or option --all should be used". format(obj_kind.upper())) sys.exit(1) obj_list = [ x[0] for x in ctx.obj.state.get_fabric_objects(obj_kind).items() if not ctx.obj.state.check_object_state(x[1], ObjectState.DELETING) and not ctx.obj.state.check_object_state(x[1], ObjectState.CREATED) ] if not obj_list: log_error( "Cannot start. There are no configured {}s".format(obj_kind)) sys.exit(1) elif workload_name: # Check if naming matches rules obj_name = ctx.obj.state.normalise_state_obj_name( obj_kind, workload_name) # Check if exist if not ctx.obj.state.check_workload(ctx.obj.state.get_current_fabric(), obj_name): log_error("Cannot start. {0} {1!r} doesn't exist".format( obj_kind.title(), obj_name)) sys.exit(1) # Check state obj = ctx.obj.state.get_fabric_object(obj_kind, obj_name) if ctx.obj.state.check_object_state(obj, ObjectState.DELETING): log_error("Cannot proceed, object is set for deletion!") sys.exit(1) if ctx.obj.state.check_object_state(obj, ObjectState.STARTED) and \ ctx.obj.state.check_object_status(obj, ObjectStatus.SUCCESS): log_warn("{0} {1!r} is already started".format( obj_kind.title(), obj_name)) return True if not ctx.obj.state.check_object_state( obj, [ObjectState.CONFIGURED, ObjectState.STOPPED]): log_error("Cannot start. {0} {1!r} should be configured".format( obj_kind.title(), obj_name)) sys.exit(1) obj_list = [workload_name] elif workload_name is None: log_error( "Cannot start. Either {}-NAME or option --all should be used". format(obj_kind.upper())) sys.exit(1) # Start dry-run if dry_run: log_warn('{0} {1!r} to be started (used with --dry-run)'.format( obj_kind.title(), obj_name)) return True log_info('{0}s to be started: {1!r}'.format(obj_kind.title(), obj_list)) # Start action temp_state = deepcopy(ctx.obj.state) res = temp_state.workload_start(temp_state.get_current_fabric(), obj_list) # Set temp state to current and dump it ctx.obj.state = deepcopy(temp_state) ctx.obj.state.dump() if not res[0]: sys.exit(res[1]) return True
def __init__(self): """Initialise all attributes""" # Initialise bwctl version self.version_file: str = '../version.txt' # noinspection PyBroadException try: with open(os.path.join(os.path.dirname(__file__), self.version_file)) as f: self.version = f.read().strip() f.close() except IOError as err: log_error("I/O error while reading {0!r} ({1!s}): {2!s}".format(self.version_file, err.errno, err.strerror)) except Exception: log_error("Unexpected error while reading {0!r}: {1!s}".format(self.version_file, sys.exc_info()[0])) # Initialise Bayware family version self.version_family_file: str = '../version_family.txt' # noinspection PyBroadException try: with open(os.path.join(os.path.dirname(__file__), self.version_family_file)) as f: self.version_family = f.read().strip() f.close() except IOError as err: log_error("I/O error while reading {0!r} ({1!s}): {2!s}".format(self.version_family_file, err.errno, err.strerror)) except Exception: # handle other exceptions such as attribute errors log_error("Unexpected error while reading {0!r}: {1!s}".format(self.version_family_file, sys.exc_info()[0])) # Init state self.state = State(self.version, self.version_family) self.state.init() # File names self.history_file: str = os.path.join(self.state.config.dir, 'history') self.credentials_template_file: str = os.path.join(self.state.config.dir, 'credentials.yml') self.terraform_dir: str = os.path.join(self.state.config.dir, 'terraform') # Ensure terraform plan structures ready try: os.makedirs(self.terraform_dir) except FileExistsError: # directory already exists pass for entity in ['modules', 'resources']: src: str = os.path.join(os.path.dirname(bwctl_resources.terraform.__file__), entity) dst: str = os.path.join(self.terraform_dir, entity) try: os.symlink(src, dst) except FileExistsError: # link already exists pass # Initialise repl self.default_prompt_msg: str = u"bwctl> " self.prompt_kwargs: Dict = { "history": FileHistory(self.history_file), "message": self.default_prompt_msg, "complete_while_typing": True, "enable_history_search": True } # Initialise supported regions self.cloud_regions: Dict = { 'aws': [ 'ap-east-1', 'ap-northeast-1', 'ap-northeast-2', 'ap-south-1', 'ap-southeast-1', 'ap-southeast-2', 'ca-central-1', 'eu-central-1', 'eu-north-1', 'eu-west-1', 'eu-west-2', 'eu-west-3', 'sa-east-1', 'us-east-1', 'us-east-2', 'us-west-1', 'us-west-2' ], 'azr': [ 'centralus', 'eastus', 'eastus2', 'japaneast', 'southcentralus', 'westeurope', 'westus', 'westus2' ], 'gcp': [ 'asia-east1', 'asia-east2', 'asia-northeast1', 'asia-northeast2', 'asia-south1', 'asia-southeast1', 'australia-southeast1', 'europe-north1', 'europe-west1', 'europe-west2', 'europe-west3', 'europe-west4', 'europe-west6', 'northamerica-northeast1', 'southamerica-east1', 'us-central1', 'us-east1', 'us-east4', 'us-west1', 'us-west2' ] }
def config_fabric(ctx, fabric_name, credentials_file, ssh_private_key): """Configure fabric""" # Check if naming matches rules fabric_name = ctx.obj.state.normalise_state_obj_name('fabric', fabric_name) company_name = ctx.obj.state.normalise_state_obj_name( 'company-name', ctx.obj.state.config.get_attr('fabric_manager')['company_name']) if bool(company_name): pattern_string = "^[a-zA-Z0-9-]+$" pattern = re.compile(pattern_string) if not pattern.match(company_name): log_error( "Cannot configure. Company name {0!r} doesn't match pattern {1!r}" .format(company_name, pattern_string)) sys.exit(1) else: log_error( "Cannot configure fabric. Company name is required. Please run 'bwctl init' command or set " "'fabric_manager.company_name' in {!r}".format( os.path.join(ctx.obj.state.config.dir, ctx.obj.state.config.file))) sys.exit(1) # Check if exist if not ctx.obj.state.check_fabric(fabric_name): log_error( "Cannot configure. Fabric {0!r} doesn't exist".format(fabric_name)) sys.exit(1) temp_state = deepcopy(ctx.obj.state) skip_ssh_keygen = False fabric = temp_state.get_fabric(fabric_name) # Set credentials cred_changed = False ssh_key_changed = False # Update credentials file if changed if bool(fabric['config'] ['credentialsFile']) and credentials_file is not None: if fabric['config']['credentialsFile'] != credentials_file: temp_state.credentials_set(fabric_name, credentials_file) log_info( "Credentials file is set to {!r}".format(credentials_file)) cred_changed = True # Check if ssh key file should be taken from config ssh_keys_cfg = ctx.obj.state.config.get_attr('ssh_keys') if bool(ssh_keys_cfg['private_key']) and ssh_private_key is None: ssh_private_key = ssh_keys_cfg['private_key'] # Check if ssh key file is provided if ssh_private_key is not None: skip_ssh_keygen = True ssh_key_changed = True if 'privateKey' in fabric['config']['sshKeys']: if fabric['config']['sshKeys']['privateKey'] == ssh_private_key: ssh_key_changed = False temp_state.set_ssh_key(ssh_private_key, fabric_name) log_info("SSH private key is set to {!r}".format(ssh_private_key)) credentials = Credentials(fabric_name, temp_state.get(), ctx.obj.state.config) if not credentials.check_ssh(ssh_private_key): log_error("Cannot configure fabric {0!r}. SSH keys issue".format( fabric_name)) sys.exit(1) # Check fabric state and status if temp_state.check_object_state( fabric, ObjectState.DELETING) and not cred_changed: log_error("Cannot proceed, object is set for deletion!") sys.exit(1) elif temp_state.check_object_state(fabric, ObjectState.CONFIGURED) and \ temp_state.check_object_status(fabric, ObjectStatus.SUCCESS): if ssh_key_changed: log_error( "There is no possibility to change SSH key. Fabric {0!r} is already configured" .format(fabric_name)) sys.exit(1) if not cred_changed: log_error( "Cannot configure. Fabric {0!r} is already configured".format( fabric_name)) sys.exit(1) else: if temp_state.check_object_state(fabric, ObjectState.CREATED): # Set company name fabric['config']['companyName'] = company_name # Configure fabric res = temp_state.fabric_configure(fabric_name, skip_ssh_keygen) if not res[0]: sys.exit(res[1]) if not temp_state.check_object_status(fabric, ObjectStatus.FAILED): log_ok("Fabric {0!r} configured successfully".format(fabric_name)) ctx.obj.state = deepcopy(temp_state) ctx.obj.state.dump()
def config_workload(ctx, workload_name, all_nodes, orchestrator_fqdn, location, dry_run, file): """Configure workload""" obj_kind = ObjectKind.WORKLOAD.value # Check if fabric is set if not ctx.obj.state.get_current_fabric(): log_error('Cannot configure {0}. Please select fabric first.'.format( obj_kind)) sys.exit(1) # Generate temporary state temp_state = deepcopy(ctx.obj.state) # Check target objects to be used obj_list = [] if all_nodes: if workload_name: log_error( "Cannot configure. Either {}-NAME or option --all should be used" .format(obj_kind.upper())) sys.exit(1) obj_all = [ x[0] for x in temp_state.get_fabric_objects(obj_kind).items() ] obj_created = [ x[0] for x in temp_state.get_fabric_objects(obj_kind).items() if temp_state.check_object_state(x[1], ObjectState.CREATED) and temp_state.check_object_status(x[1], ObjectStatus.SUCCESS) ] obj_created_failed = [ x[0] for x in temp_state.get_fabric_objects(obj_kind).items() if temp_state.check_object_state(x[1], ObjectState.CREATED) and temp_state.check_object_status(x[1], ObjectStatus.FAILED) ] obj_deleting = [ x[0] for x in temp_state.get_fabric_objects(obj_kind).items() if temp_state.check_object_state(x[1], ObjectState.DELETING) ] obj_list = [ x for x in obj_all if x not in obj_deleting and x not in obj_created_failed ] if not obj_list: states = [ ObjectState.CREATED.value, ObjectState.CONFIGURED.value, ObjectState.UPDATED.value, ObjectState.STARTED.value, ObjectState.STOPPED.value ] log_error( "Cannot configure. There are no {}s in states: {!r}".format( obj_kind, states)) sys.exit(1) if obj_created and orchestrator_fqdn is None: log_error( 'Cannot configure {}. Option "--orchestrator-fqdn" is required by {!r}' .format(obj_kind.lower(), obj_created)) sys.exit(1) if obj_created_failed: log_warn('There are failed {}s during creation: {!r}. Skipping...'. format(obj_kind.lower(), obj_created_failed)) if obj_deleting: log_warn( 'There are {}s in deleting state: {!r}. Skipping...'.format( obj_kind.lower(), obj_deleting)) elif workload_name: # Check if naming matches rules obj_name = temp_state.normalise_state_obj_name(obj_kind, workload_name) # Check if exist if not temp_state.check_workload(temp_state.get_current_fabric(), obj_name): log_error("Cannot configure. {0} {1!r} doesn't exist".format( obj_kind.title(), obj_name)) sys.exit(1) # Check state obj = temp_state.get_fabric_object(obj_kind, workload_name) if temp_state.check_object_state(obj, ObjectState.DELETING): log_error("Cannot proceed, object is set for deletion!") sys.exit(1) if temp_state.check_object_state(obj, ObjectState.CREATED): if orchestrator_fqdn is None: log_error( 'Cannot configure. Missing option "--orchestrator-fqdn"') sys.exit(1) if temp_state.check_object_state(obj, ObjectState.CREATED) and \ temp_state.check_object_status(obj, ObjectStatus.FAILED): log_error( "{0} was created with failures. Run create {1} again before configure" .format(obj_kind.title(), obj_kind)) sys.exit(1) obj_list = [workload_name] elif workload_name is None: log_error( "Cannot configure. Either {}-NAME or option --all should be used". format(obj_kind.upper())) sys.exit(1) # Set configuration for obj_name in obj_list: obj = temp_state.get_fabric_object(obj_kind, obj_name) if orchestrator_fqdn is not None: obj['config']['orchestrator'] = orchestrator_fqdn if location is not None: obj['config']['location'] = location if orchestrator_fqdn is None: log_warn('{0}s {1!r} to be re-configured'.format( obj_kind.title(), obj_list)) # Configure dry-run if dry_run: log_warn('{0}s {1!r} to be configured (used with --dry-run)'.format( obj_kind.title(), obj_list)) return True actions_list = temp_state.configure_actions_list(obj_kind, obj_list) if actions_list['config']: res = temp_state.workload_configure(temp_state.get_current_fabric(), actions_list['config']) if not res[0]: ctx.obj.state = deepcopy(temp_state) ctx.obj.state.dump() sys.exit(res[1]) if actions_list['stop']: res = ctx.obj.state.workload_stop(temp_state.get_current_fabric(), actions_list['stop']) if not res[0]: ctx.obj.state = deepcopy(temp_state) ctx.obj.state.dump() sys.exit(res[1]) if actions_list['start']: res = ctx.obj.state.workload_start(temp_state.get_current_fabric(), actions_list['start']) if not res[0]: ctx.obj.state = deepcopy(temp_state) ctx.obj.state.dump() sys.exit(res[1]) ctx.obj.state = deepcopy(temp_state) ctx.obj.state.dump() if file: click.echo('File: {0}'.format(file)) return True
def delete_orchestrator(ctx, orchestrator_name, dry_run): """Delete orchestrator""" obj_kind = ObjectKind.ORCHESTRATOR.value obj_name = orchestrator_name # Check if fabric is set if not ctx.obj.state.get_current_fabric(): log_error('Cannot delete {0}. Please select fabric first.'.format(obj_kind)) sys.exit(1) # Check if naming matches rules obj_name = ctx.obj.state.normalise_state_obj_name(obj_kind, obj_name) log_info('Deleting {0} {1!r}...'.format(obj_kind, obj_name)) # Check if exists if not ctx.obj.state.check_orchestrator(ctx.obj.state.get_current_fabric(), obj_name): log_error("Cannot delete. {0} {1!r} doesn't exist".format(obj_kind.title(), obj_name)) sys.exit(1) if dry_run: log_warn('{0} {1!r} to be deleted (started with --dry-run)'.format(obj_kind.title(), obj_name)) return True # Check if configured obj = ctx.obj.state.get_fabric_object(obj_kind, obj_name) if not ctx.obj.state.check_object_state(obj, ObjectState.CREATED): # Delete orchestrator ansible_vars = [] ansible_nodes = [obj_name] ansible_playbook = "delete-" + obj['type'] + ".yml" # Get swarm manager managers = [x for x in ctx.obj.state.get_fabric_objects(obj_kind).items() if 'manager' in x[1]['role']] workers = [x for x in ctx.obj.state.get_fabric_objects(obj_kind).items() if 'worker' in x[1]['role']] if not managers: log_warn('There is no swarm manager found. Nothing should be done with ansible') else: # Check if manager and there are workers if obj['role'] == 'manager' and workers: log_error( '{0} {1!r} is swarm manager. Remove workers first:'.format(obj_kind.title(), orchestrator_name)) for node in workers: log_error("{0!r}".format(node[0])) sys.exit(1) ansible_vars = ansible_vars + [('env_swarm_manager_host', managers[0][0])] ansible_nodes = ansible_nodes + [managers[0][0]] ansible = Ansible(ctx.obj.state, ctx.obj.state.get_current_fabric(), ctx.obj.state.get_ssh_key()) if ansible.inventory_generate(ansible_nodes, node_vars=ansible_vars): ansible_result = ansible.run_playbook(ansible_playbook) if not ansible_result[0]: log_error('Cannot delete {0}. There is issue with ansible playbook execution'.format(obj_kind)) sys.exit(ansible_result[1]) else: sys.exit(1) # Get credentials credentials = Credentials(ctx.obj.state.get_current_fabric(), ctx.obj.state.get(), ctx.obj.state.config) if not credentials.get(): log_error('Cannot delete {0}. Not able to get credentials'.format(obj_kind)) sys.exit(1) # Actual deletion temp_state = deepcopy(ctx.obj.state) res = temp_state.delete_fabric_object(obj_kind, obj_name) if not res.status: log_error('{0} {1!r} cannot be deleted. {2!s}'.format(obj_kind, obj_name, res.value)) sys.exit(1) # Run Terraform with new state terraform = Terraform(temp_state.get_current_fabric(), temp_state, credentials, ctx.obj.version) if not terraform.plan_generate(): log_error('Cannot delete {0}. There is issue with terraform plan generation'.format(obj_kind)) sys.exit(1) terraform_result = terraform.plan_execute() if not terraform_result[0]: log_error('Cannot delete {0}. There is issue with terraform plan execution'.format(obj_kind)) sys.exit(terraform_result[1]) # Set new state to be current and dump it ctx.obj.state = deepcopy(temp_state) ctx.obj.state.dump() log_ok('{0} {1!r} deleted successfully'.format(obj_kind.title(), obj_name)) # Generate SSH configuration ssh_config = SshConfig(ctx.obj.state) if ssh_config.generate_config(): log_info('Generating SSH config...') else: log_warn('Error during SSH config generation') return True
def update_workload(ctx, workload_name, dry_run, all_nodes): """Update workload""" obj_kind = ObjectKind.WORKLOAD.value obj_list, obj_name = None, None temp_state = deepcopy(ctx.obj.state) # Check if fabric is set if not temp_state.get_current_fabric(): log_error( 'Cannot update {0}. Please select fabric first.'.format(obj_kind)) sys.exit(1) # Check target objects to be used if all_nodes: if workload_name: log_error( "Cannot update. Either {}-NAME or option --all should be used". format(obj_kind.upper())) sys.exit(1) obj_all = [ x[0] for x in temp_state.get_fabric_objects(obj_kind).items() ] obj_created = [ x[0] for x in temp_state.get_fabric_objects(obj_kind).items() if temp_state.check_object_state(x[1], ObjectState.CREATED) ] obj_deleting = [ x[0] for x in temp_state.get_fabric_objects(obj_kind).items() if temp_state.check_object_state(x[1], ObjectState.DELETING) ] obj_list = [ x for x in obj_all if x not in obj_deleting and x not in obj_created ] if not obj_list: states = [ ObjectState.CONFIGURED.value, ObjectState.UPDATED.value, ObjectState.STARTED.value, ObjectState.STOPPED.value ] log_error("Cannot update. There are no {}s in states: {!r}".format( obj_kind, states)) sys.exit(1) if obj_created: log_warn( 'There are {}s in created state: {!r}. Skipping...'.format( obj_kind.lower(), obj_created)) if obj_deleting: log_warn( 'There are {}s in deleting state: {!r}. Skipping...'.format( obj_kind.lower(), obj_deleting)) elif workload_name: # Check if naming matches rules obj_name = temp_state.normalise_state_obj_name(obj_kind, workload_name) # Check if exist if not temp_state.check_workload(temp_state.get_current_fabric(), obj_name): log_error("Cannot update. {0} {1!r} doesn't exist".format( obj_kind.title(), obj_name)) sys.exit(1) # Check state obj = temp_state.get_fabric_object(obj_kind, obj_name) if temp_state.check_object_state(obj, ObjectState.DELETING): log_error("Cannot proceed, object is set for deletion!") sys.exit(1) if temp_state.check_object_state(obj, ObjectState.CREATED): log_error("Cannot proceed, object is not configured") sys.exit(1) obj_list = [workload_name] elif workload_name is None: log_error( "Cannot update. Either {}-NAME or option --all should be used". format(obj_kind.upper())) sys.exit(1) # Update dry-run if dry_run: log_warn('{0}s to be updated: {1!r} (used with --dry-run)'.format( obj_kind.title(), obj_list)) return True log_info('{0}s to be updated: {1!r}'.format(obj_kind.title(), obj_list)) # Get action list actions_list = temp_state.update_actions_list(obj_kind, obj_list) if actions_list['update']: ansible_playbook = "update-workload.yml" res = temp_state.obj_update(ansible_playbook, obj_kind, actions_list['update']) ctx.obj.state = deepcopy(temp_state) ctx.obj.state.dump() if not res[0]: sys.exit(res[1]) temp_state = deepcopy(ctx.obj.state) if actions_list['config']: res = temp_state.workload_configure(temp_state.get_current_fabric(), actions_list['config']) ctx.obj.state = deepcopy(temp_state) ctx.obj.state.dump() if not res[0]: sys.exit(res[1]) temp_state = deepcopy(ctx.obj.state) if actions_list['stop']: res = temp_state.workload_stop(temp_state.get_current_fabric(), actions_list['stop']) ctx.obj.state = deepcopy(temp_state) ctx.obj.state.dump() if not res[0]: sys.exit(res[1]) temp_state = deepcopy(ctx.obj.state) if actions_list['start']: res = temp_state.workload_start(temp_state.get_current_fabric(), actions_list['start']) ctx.obj.state = deepcopy(temp_state) ctx.obj.state.dump() if not res[0]: sys.exit(res[1]) return True
def show_vpc(ctx, name, cloud, full, regions): """Show VPC information""" obj_kind = ObjectKind.VPC.value if regions: if cloud != 'all': for x in ctx.obj.cloud_regions[cloud]: click.echo('{0!s}'.format(x)) else: for x in ctx.obj.cloud_regions: click.echo('{0!s}:'.format(x)) for y in ctx.obj.cloud_regions[x]: click.echo(' {0!s}'.format(y)) return True # Check if fabric is set if not ctx.obj.state.get_current_fabric(): log_error('Cannot show VPC contents. Please select fabric first.') sys.exit(1) # Check if naming matches rules if name is not None: name = ctx.obj.state.normalise_state_obj_name(obj_kind, name) # Initialise vpc object vpc_obj = [] # If vpc name set if name is not None: # If name set - always show full info full = True # Check VPC existence if not ctx.obj.state.check_vpc(ctx.obj.state.get_current_fabric(), name): log_error("Cannot show. VPC {0!r} doesn't exist".format(name)) sys.exit(1) # If no filter by cloud if cloud == 'all': vpc_obj = [ x for x in ctx.obj.state.get_fabric_objects(obj_kind).items() if name in x[0] ] # If filter by cloud if cloud != 'all': vpc_obj = [ x for x in ctx.obj.state.get_fabric_objects(obj_kind).items() if cloud in x[1]['cloud'] and name in x[0] ] # Empty result set if not vpc_obj: log_error("Cannot show. VPC {0!r} isn't in {1!r} cloud".format( name, cloud)) sys.exit(1) # If name not set else: # If filter by cloud if cloud != 'all': vpc_obj = [ x for x in ctx.obj.state.get_fabric_objects(obj_kind).items() if cloud in x[1]['cloud'] ] # Empty result set if not vpc_obj: log_error("Cannot show. No VPC in {0!r} cloud".format(cloud)) sys.exit(1) # If no filter by cloud else: vpc_obj = [ x for x in ctx.obj.state.get_fabric_objects(obj_kind).items() ] # Output if full: if name is not None: click.secho('{0} {1!s}:'.format(obj_kind.title(), name), fg='green') else: click.secho("{0} list in {1!r} cloud".format( obj_kind.title(), cloud), fg='green') click.echo(json.dumps(dict(vpc_obj), indent=2)) return True else: click.secho("{0} list in {1!r} cloud".format(obj_kind.title(), cloud), fg='green') for obj in vpc_obj: click.echo('{0:15s}'.format(obj[0])) return True
def plan_execute(self): """Execute terraform plan""" terraform_steps = { 'init': [ "init", "-input=false", "-force-copy" ], 'apply': [ "apply", "-input=false", "-var", "gcp_credentials=" + self.gcp_credentials, "-var", "gcp_project_name=" + self.gcp_project_name, "-var", "azr_client_id=" + self.azr_client_id, "-var", "azr_client_secret=" + self.azr_client_secret, "-var", "azr_resource_group_name=" + self.azr_resource_group_name, "-var", "azr_subscription_id=" + self.azr_subscription_id, "-var", "azr_tennant_id=" + self.azr_tennant_id, "-var", "production=" + self.production, "-var", "bastion_ip=" + self.config.get_attr('fabric_manager')['ip'], "-var", "image_tag=" + self.image_tag, "-var", "image_version=" + self.image_family.replace(".", "-"), "-var", "dns_managed_zone_domain=" + self.hosted_zone, "-auto-approve" ] } # Check if there AWS keys or EC2 role should be used if not self.aws_ec2_role: terraform_steps['init'].append("-backend-config=secret_key={}".format(self.s3_secret_key)) terraform_steps['init'].append("-backend-config=access_key={}".format(self.s3_access_key_id)) terraform_steps['apply'].append("-var") terraform_steps['apply'].append("aws_secret_key={}".format(self.s3_secret_key)) terraform_steps['apply'].append("-var") terraform_steps['apply'].append("aws_access_key={}".format(self.s3_access_key_id)) cwd = os.getcwd() log_info("Running terraform init and apply...") os.chdir(self.terraform_dir) # Check if there is any terraform already running proc_name = 'terraform' proc_result = check_process_running(proc_name) if proc_result[0]: log_error("There is already {!r} process (PID {!r}) running for user {!r}. Please retry again " "later...".format(proc_name, str(proc_result[1]), proc_result[2])) return False, 1 def show_step(item): """Print current step""" # We need to return next step as progressbar prints previously completed step if item is not None: t_keys = list(terraform_steps.keys()) idx = t_keys.index(item) if idx == len(t_keys) - 1: return '-> {0}'.format(item) else: return '-> {0}'.format(t_keys[idx + 1]) if not self.config.get_debug(): with click.progressbar(terraform_steps, item_show_func=show_step, show_eta=False) as bar: for step in bar: try: sh.terraform(terraform_steps[step]) except sh.ErrorReturnCode as err: log_info(err.full_cmd) log_info('Command output:' + err.stdout.decode('UTF-8').rstrip()) log_error(err.stderr.decode('UTF-8').rstrip(), nl=False) log_error("Unexpected terraform error during {0!s} (status code {1!r})".format(step, err.exit_code)) os.chdir(cwd) return False, err.exit_code else: for step in terraform_steps: try: cmd = sh.terraform(terraform_steps[step], _out=self.terraform_process_output, _bg=True) cmd.wait() except sh.ErrorReturnCode as err: log_info(err.full_cmd) log_info('Command output:' + err.stdout.decode('UTF-8').rstrip()) log_error(err.stderr.decode('UTF-8').rstrip(), nl=False) log_error("Unexpected terraform error during {0!s} (status code {1!r})".format(step, err.exit_code)) os.chdir(cwd) return False, err.exit_code os.chdir(cwd) return True, 0
def show_workload(ctx, name, cloud, full): """Show workload information""" obj_kind = ObjectKind.WORKLOAD.value if not ctx.obj.state.get_current_fabric(): log_error( 'Cannot show {0} contents. Please select fabric first.'.format( obj_kind)) sys.exit(1) # Check if naming matches rules if name is not None: name = ctx.obj.state.normalise_state_obj_name(obj_kind, name) # Initialise workload object workload_obj = [] # If workload name set if name is not None: # If name set - always show full info full = True # Check workload existence if not ctx.obj.state.check_workload(ctx.obj.state.get_current_fabric(), name): log_error("Cannot show. {0} {1!r} doesn't exist".format( obj_kind.title(), name)) sys.exit(1) # If no filter by cloud if cloud == 'all': workload_obj = [ x for x in ctx.obj.state.get_fabric_objects(obj_kind).items() if name in x[0] ] # If filter by cloud if cloud != 'all': workload_obj = [ x for x in ctx.obj.state.get_fabric_objects(obj_kind).items() if cloud in ctx.obj.state.get_fabric_object( 'vpc', ctx.obj.state.get_fabric_object(obj_kind, x[0])['vpc']) ['cloud'] and name in x[0] ] # Empty result set if not workload_obj: log_error("Cannot show. {0} {1!r} isn't in {2!r} cloud".format( obj_kind.title(), name, cloud)) sys.exit(1) # If name not set else: # If filter by cloud if cloud != 'all': workload_obj = [ x for x in ctx.obj.state.get_fabric_objects(obj_kind).items() if cloud in ctx.obj.state.get_fabric_object( 'vpc', ctx.obj.state.get_fabric_object(obj_kind, x[0])['vpc']) ['cloud'] ] # Empty result set if not workload_obj: log_error("Cannot show. No {0} in {1!r} cloud".format( obj_kind, cloud)) sys.exit(1) # If no filter by cloud else: workload_obj = [ x for x in ctx.obj.state.get_fabric_objects(obj_kind).items() ] # Empty result set if not workload_obj: log_error("Cannot show. No {0} in {1!r} cloud".format( obj_kind, cloud)) sys.exit(1) # Output if full: if name is not None: click.secho('{0} {1!s}:'.format(obj_kind.title(), name), fg='green') else: click.secho("{0} list in {1!r} cloud".format( obj_kind.title(), cloud), fg='green') click.echo(json.dumps(dict(workload_obj), indent=2)) return True else: click.secho("{0} list in {1!r} cloud".format(obj_kind.title(), cloud), fg='green') for obj in workload_obj: click.echo('{0:15s}'.format(obj[0])) return True
def delete_batch(ctx, filename, input_format, dry_run, yes): """Delete batch""" log_info('Deleting batch: file={!r}, input=format={!r}, dry-run={!r}'.format(filename, input_format, dry_run)) # Only YAML currently if input_format != 'yaml': log_warn('Only YAML supported') # Safely load YAML # noinspection PyBroadException try: with open(filename, 'r') as config_f: try: batch = yaml.safe_load(config_f) except yaml.YAMLError as err: log_error("Error while loading YAML: {0!r}".format(err)) if hasattr(err, 'problem_mark'): mark = err.problem_mark log_error("Error position: ({}:{})".format(mark.line + 1, mark.column + 1)) sys.exit(1) config_f.close() except IOError as err: log_error(err) sys.exit(1) except Exception: # handle other exceptions such as attribute errors print("Unexpected error:", sys.exc_info()[0]) sys.exit(1) # Parse batch try: batch = BatchSpec(batch, ctx.obj.state.api_version) except TypeError as err: log_error("Cannot parse batch: {0!r}".format(err)) sys.exit(1) # Get objects to be deleted (by fabric) delete_target = {} for obj_kind in [ObjectKind.FABRIC, ObjectKind.VPC, ObjectKind.ORCHESTRATOR, ObjectKind.PROCESSOR, ObjectKind.WORKLOAD]: for obj in batch.get_attr_list(obj_kind): if obj_kind == ObjectKind.FABRIC: fabric_name = obj['metadata']['name'] else: fabric_name = obj['metadata']['fabric'] if fabric_name not in delete_target: delete_target[fabric_name] = {ObjectKind.FABRIC: [], ObjectKind.VPC: [], ObjectKind.WORKLOAD: [], ObjectKind.ORCHESTRATOR: [], ObjectKind.PROCESSOR: []} delete_target[fabric_name][obj_kind].append(batch.get_attr_name(obj)) for fabric in delete_target: for obj_kind in [ObjectKind.FABRIC, ObjectKind.VPC, ObjectKind.ORCHESTRATOR, ObjectKind.PROCESSOR, ObjectKind.WORKLOAD]: if obj_kind in delete_target[fabric]: if delete_target[fabric][obj_kind]: log_info('{0}: {1!r}'.format(obj_kind.value.title(), delete_target[fabric][obj_kind])) def confirm_yes_no(message, default="no"): """Requires confirmation with yes/no""" valid = {"yes": True, "y": True, "ye": True, "no": False, "n": False} prompt = " [y/N] " while True: log_info(message + prompt) choice = input().lower() if choice == '': return valid[default] elif choice in valid: return valid[choice] else: log_info("Please respond with 'yes' or 'no'") if not yes: if not confirm_yes_no('Do you want to delete these objects?'): log_info('Exiting...') return True # For all fabrics in delete target for fabric in delete_target: # Processing fabrics if fabric in delete_target[fabric][ObjectKind.FABRIC]: temp_state_ok = deepcopy(ctx.obj.state) temp_state_failed = deepcopy(ctx.obj.state) if temp_state_failed.check_fabric(fabric): log_warn('Fabric {0!r} is going to be deleted with all nested objects'.format(fabric)) # Set deleting status in state temp_state_failed.set_object_state_status(temp_state_failed.get_fabric(fabric), ObjectState.DELETING, ObjectStatus.FAILED) # Get nested objects nested = [] for obj_kind in [ObjectKind.VPC, ObjectKind.ORCHESTRATOR, ObjectKind.PROCESSOR, ObjectKind.WORKLOAD]: nested.append([obj[0] for obj in temp_state_failed.get_fabric_objects(obj_kind.value, fabric).items()]) # Set nested state to deleting for index, obj_kind in enumerate([ObjectKind.VPC, ObjectKind.ORCHESTRATOR, ObjectKind.PROCESSOR, ObjectKind.WORKLOAD]): for obj_name in nested[index]: obj = temp_state_failed.get_fabric_object(obj_kind.value, obj_name, fabric) temp_state_failed.set_object_state_status(obj, ObjectState.DELETING, ObjectStatus.FAILED) if obj_kind != ObjectKind.VPC: res = temp_state_ok.delete_fabric_object(obj_kind.value, obj_name, fabric) if not res.status: log_error('Fabric {0!r} cannot be deleted. {1!s}'.format(fabric, res.value)) sys.exit(1) # Try to delete nodes-only nested objects nested_nodes = nested[1] + nested[2] + nested[3] if nested_nodes: log_info("Deleting nested nodes first") # Get credentials credentials = Credentials(fabric, temp_state_failed.get(), ctx.obj.state.config) if not credentials.get(): log_error('Batch failed. Not able to get credentials') sys.exit(1) # Run terraform terraform = Terraform(fabric, temp_state_ok, credentials, ctx.obj.version) if not terraform.plan_generate(): log_error('Batch failed to delete nested nodes: {!r}. Exiting'.format(nested_nodes)) ctx.obj.state = deepcopy(temp_state_failed) ctx.obj.state.dump() sys.exit(1) terraform_result = terraform.plan_execute() if not terraform_result[0]: log_error('Batch failed to delete nested nodes: {!r}. Exiting'.format(nested_nodes)) ctx.obj.state = deepcopy(temp_state_failed) ctx.obj.state.dump() sys.exit(terraform_result[1]) # If this is current fabric, unset it if temp_state_failed.get_current_fabric() == fabric: ctx.obj.set_current_fabric(None) # Delete fabric from state res = temp_state_ok.delete_fabric(fabric) if not res.status: log_error('Fabric {0!r} cannot be deleted. {1!s}'.format(fabric, res.value)) sys.exit(1) # Try to delete nested VPCs if nested[0]: log_info("Deleting nested VPCs") # Get credentials credentials = Credentials(fabric, temp_state_failed.get(), ctx.obj.state.config) if not credentials.get(): log_error('Batch failed. Not able to get credentials') sys.exit(1) # Run terraform terraform = Terraform(fabric, temp_state_ok, credentials, ctx.obj.version) if not terraform.plan_generate(): log_error('Batch failed to delete fabric: {!r}. Exiting'.format(fabric)) ctx.obj.state = deepcopy(temp_state_failed) ctx.obj.state.dump() sys.exit(1) terraform_result = terraform.plan_execute() if not terraform_result[0]: log_error('Batch failed to delete fabric: {!r}. Exiting'.format(fabric)) ctx.obj.state = deepcopy(temp_state_failed) ctx.obj.state.dump() sys.exit(terraform_result[1]) # Run ansible playbook ansible_playbook = "delete-fabric.yml" ansible = Ansible(temp_state_failed, fabric, temp_state_failed.get_ssh_key()) log_info("Delete {0!r} fabric's files".format(fabric)) ansible_result = ansible.run_playbook(ansible_playbook, local=True) if not ansible_result[0]: log_error('Cannot delete fabric. There is issue with ansible playbook execution') ctx.obj.state = deepcopy(temp_state_failed) ctx.obj.dump() sys.exit(ansible_result[1]) # Delete fabrics batches temp_state_ok.nodebatch_delete(fabric=fabric) # Success ctx.obj.state = deepcopy(temp_state_ok) ctx.obj.state.dump() # Processing VPCs nested = {} temp_state_ok = deepcopy(ctx.obj.state) temp_state_failed = deepcopy(ctx.obj.state) # For all vpc in delete target vpc_list = delete_target[fabric][ObjectKind.VPC][:] for vpc in vpc_list: # Check if objects are exist if not temp_state_failed.check_fabric(fabric): delete_target[fabric][ObjectKind.VPC].remove(vpc) continue elif not temp_state_failed.check_vpc(fabric, vpc): delete_target[fabric][ObjectKind.VPC].remove(vpc) continue log_warn('VPC {0!r} is going to be deleted with all nested objects'.format(vpc)) # Set deleting status in state obj = temp_state_failed.get_fabric_object(ObjectKind.VPC.value, vpc, fabric) temp_state_failed.set_object_state_status(obj, ObjectState.DELETING, ObjectStatus.FAILED) # Get nested objects nested[vpc] = [] for obj_kind in [ObjectKind.ORCHESTRATOR, ObjectKind.PROCESSOR, ObjectKind.WORKLOAD]: nested[vpc].append([obj[0] for obj in temp_state_failed.get_fabric_objects(obj_kind.value, fabric).items() if vpc in obj[1][ObjectKind.VPC.value]]) # Set nested state to deleting for index, obj_kind in enumerate([ObjectKind.ORCHESTRATOR, ObjectKind.PROCESSOR, ObjectKind.WORKLOAD]): for obj_name in nested[vpc][index]: obj = temp_state_failed.get_fabric_object(obj_kind.value, obj_name, fabric) temp_state_failed.set_object_state_status(obj, ObjectState.DELETING, ObjectStatus.FAILED) res = temp_state_ok.delete_fabric_object(obj_kind.value, obj_name, fabric) if not res.status: log_error('Nested object {0!r} cannot be deleted. {1!s}'.format(obj_name, res.value)) sys.exit(1) # Try to delete nodes-only nested objects delete_nodes = False for vpc in delete_target[fabric][ObjectKind.VPC]: if nested[vpc][0] or nested[vpc][1] or nested[vpc][2]: delete_nodes = True if delete_nodes: log_info("Deleting nested nodes first") # Get credentials credentials = Credentials(fabric, temp_state_failed.get(), ctx.obj.state.config) if not credentials.get(): log_error('Batch failed. Not able to get credentials') sys.exit(1) # Run terraform terraform = Terraform(fabric, temp_state_ok, credentials, ctx.obj.version) if not terraform.plan_generate(): log_error('Batch failed to delete nodes, that nested to VPCs: {!r}. Exiting'. format(delete_target[fabric][ObjectKind.VPC])) ctx.obj.state = deepcopy(temp_state_failed) ctx.obj.state.dump() sys.exit(1) terraform_result = terraform.plan_execute() if not terraform_result[0]: log_error('Batch failed to delete nodes, that nested to VPCs: {!r}. Exiting'. format(delete_target[fabric][ObjectKind.VPC])) ctx.obj.state = deepcopy(temp_state_failed) ctx.obj.state.dump() sys.exit(terraform_result[1]) # Delete VPC from state for vpc in delete_target[fabric][ObjectKind.VPC]: res = temp_state_ok.delete_fabric_object(ObjectKind.VPC.value, vpc, fabric) if not res.status: log_error('VPC {0!r} cannot be deleted. {1!s}'.format(vpc, res.value)) sys.exit(1) # Actual VPC delete if delete_target[fabric][ObjectKind.VPC]: log_info("Deleting VPCs") # Get credentials credentials = Credentials(fabric, temp_state_failed.get(), ctx.obj.state.config) if not credentials.get(): log_error('Batch failed. Not able to get credentials') sys.exit(1) # Run terraform terraform = Terraform(fabric, temp_state_ok, credentials, ctx.obj.version) if not terraform.plan_generate(): log_error('Batch failed to delete VPCs: {!r}. Exiting'.format(delete_target[fabric][ObjectKind.VPC])) ctx.obj.state = deepcopy(temp_state_failed) ctx.obj.state.dump() sys.exit(1) terraform_result = terraform.plan_execute() if not terraform_result[0]: log_error('Batch failed to delete VPCs: {!r}. Exiting'.format(delete_target[fabric][ObjectKind.VPC])) ctx.obj.state = deepcopy(temp_state_failed) ctx.obj.state.dump() sys.exit(terraform_result[1]) # Delete VPCs batches for vpc in delete_target[fabric][ObjectKind.VPC]: temp_state_ok.nodebatch_delete(vpc=vpc) # Success ctx.obj.state = deepcopy(temp_state_ok) ctx.obj.state.dump() # Processing nodes temp_state_ok = deepcopy(ctx.obj.state) temp_state_failed = deepcopy(ctx.obj.state) node_list = {} # For all nodes in delete target for obj_kind in [ObjectKind.ORCHESTRATOR, ObjectKind.PROCESSOR, ObjectKind.WORKLOAD]: node_list[obj_kind] = delete_target[fabric][obj_kind][:] for node in node_list[obj_kind]: # Check if objects are exist if not temp_state_failed.check_fabric(fabric): delete_target[fabric][obj_kind].remove(node) continue else: if obj_kind is ObjectKind.ORCHESTRATOR: if not temp_state_failed.check_orchestrator(fabric, node): delete_target[fabric][obj_kind].remove(node) continue elif obj_kind is ObjectKind.PROCESSOR: if not temp_state_failed.check_processor(fabric, node): delete_target[fabric][obj_kind].remove(node) continue elif obj_kind is ObjectKind.WORKLOAD: if not temp_state_failed.check_workload(fabric, node): delete_target[fabric][obj_kind].remove(node) continue # Set deleting status in state obj = temp_state_failed.get_fabric_object(obj_kind.value, node, fabric) temp_state_failed.set_object_state_status(obj, ObjectState.DELETING, ObjectStatus.FAILED) # Delete node from state res = temp_state_ok.delete_fabric_object(obj_kind.value, node, fabric) if not res.status: log_error('Node {0!r} cannot be deleted. {1!s}'.format(node, res.value)) sys.exit(1) # Check if there is at least one processor left in VPC where workloads are present for proc in delete_target[fabric][ObjectKind.PROCESSOR][:]: # Check if its the only processor in VPC and there are workloads proc_vpc = temp_state_failed.get_fabric_object(ObjectKind.PROCESSOR.value, proc, fabric).get('vpc') processors = [x[0] for x in temp_state_ok.get_fabric_object(ObjectKind.PROCESSOR.value, fabric).items() if proc_vpc == x[1]['vpc']] workloads = [x[0] for x in temp_state_ok.get_fabric_object(ObjectKind.WORKLOAD.value, fabric).items() if proc_vpc == x[1]['vpc']] if not bool(processors) and bool(workloads): log_error("Cannot delete {0!r}. At least one {1} should left in VPC {2!r} to manage workloads: {3}" .format(proc, ObjectKind.PROCESSOR.value.title(), proc_vpc, workloads)) sys.exit(1) # Actual node delete if delete_target[fabric][ObjectKind.ORCHESTRATOR] or delete_target[fabric][ObjectKind.PROCESSOR] or \ delete_target[fabric][ObjectKind.WORKLOAD]: # Get credentials credentials = Credentials(fabric, temp_state_failed.get(), ctx.obj.state.config) if not credentials.get(): log_error('Batch failed. Not able to get credentials') sys.exit(1) # Run terraform terraform = Terraform(fabric, temp_state_ok, credentials, ctx.obj.version) if not terraform.plan_generate(): delete_nodes = delete_target[fabric][ObjectKind.ORCHESTRATOR] + \ delete_target[fabric][ObjectKind.PROCESSOR] + delete_target[fabric][ObjectKind.WORKLOAD] log_error('Batch failed to delete nodes: {!r}. Exiting'.format(delete_nodes)) ctx.obj.state = deepcopy(temp_state_failed) ctx.obj.state.dump() sys.exit(1) terraform_result = terraform.plan_execute() if not terraform_result[0]: log_error('Batch failed to delete nodes: {!r}. Exiting'.format(delete_nodes)) ctx.obj.state = deepcopy(temp_state_failed) ctx.obj.state.dump() sys.exit(terraform_result[1]) # Success ctx.obj.state = deepcopy(temp_state_ok) ctx.obj.state.dump() log_ok('Batch is finished') # Generate SSH configuration ssh_config = SshConfig(ctx.obj.state) if ssh_config.generate_config(): log_info('Generating SSH config...') else: log_warn('Error during SSH config generation') return True
def delete_vpc(ctx, vpc_name, dry_run): """Delete VPC""" obj_kind = ObjectKind.VPC.value obj_name = vpc_name # Check if fabric is set if not ctx.obj.state.get_current_fabric(): log_error('Cannot delete VPC. Please select fabric first.') sys.exit(1) # Check if naming matches rules obj_name = ctx.obj.state.normalise_state_obj_name(obj_kind, obj_name) log_info('Deleting VPC {0!r}...'.format(obj_name)) # Check if VPC exists if not ctx.obj.state.check_vpc(ctx.obj.state.get_current_fabric(), obj_name): log_error("Cannot delete. VPC {0!r} doesn't exist".format(obj_name)) sys.exit(1) # Check if there are nodes in VPC node_list = [] for node_kind in [ObjectKind.ORCHESTRATOR.value, ObjectKind.PROCESSOR.value, ObjectKind.WORKLOAD.value]: node_list = node_list + [x for x in ctx.obj.state.get_fabric_objects(node_kind).items() if obj_name in x[1][obj_kind]] if node_list: log_error("Cannot delete. VPC {0!r} contains nodes, delete them first:".format(obj_name)) for node in node_list: log_error("{0!r}".format(node[0])) sys.exit(1) # Delete VPC if dry_run: log_warn('{0} {1!r} to be deleted (started with --dry-run)'.format(obj_kind.upper(), obj_name)) return True # Get credentials credentials = Credentials(ctx.obj.state.get_current_fabric(), ctx.obj.state.get(), ctx.obj.state.config) if not credentials.get(): log_error('Cannot delete VPC. Not able to get credentials') sys.exit(1) # Actual VPC deletion temp_state = deepcopy(ctx.obj.state) res = temp_state.delete_fabric_object(obj_kind, obj_name) if not res.status: log_error('{0} {1!r} cannot be deleted. {2!s}'.format(obj_kind.upper(), obj_name, res.value)) sys.exit(1) # Run Terraform with new state terraform = Terraform(temp_state.get_current_fabric(), temp_state, credentials, ctx.obj.version) if not terraform.plan_generate(): log_error('Cannot delete VPC. There is issue with terraform plan generation') sys.exit(1) terraform_result = terraform.plan_execute() if not terraform_result[0]: log_error('Cannot delete VPC. There is issue with terraform plan execution') sys.exit(terraform_result[1]) # Delete VPCs batches temp_state.nodebatch_delete(vpc=obj_name) # Set new state to be current and dump it ctx.obj.state = deepcopy(temp_state) ctx.obj.state.dump() log_ok('{0} {1!r} deleted successfully'.format(obj_kind.upper(), obj_name)) return True
def delete_workload(ctx, workload_name, dry_run): """Delete workload""" obj_kind = ObjectKind.WORKLOAD.value obj_name = workload_name # Check if fabric is set if not ctx.obj.state.get_current_fabric(): log_error('Cannot delete {0}. Please select fabric first.'.format(obj_kind)) sys.exit(1) # Check if naming matches rules obj_name = ctx.obj.state.normalise_state_obj_name(obj_kind, obj_name) log_info('Deleting {0} {1!r}...'.format(obj_kind, obj_name)) # Check if exists if not ctx.obj.state.check_workload(ctx.obj.state.get_current_fabric(), obj_name): log_error("Cannot delete. {0} {1!r} doesn't exist".format(obj_kind.title(), obj_name)) sys.exit(1) if dry_run: log_warn('{0} {1!r} to be deleted (started with --dry-run)'.format(obj_kind.title(), obj_name)) return True # Get credentials credentials = Credentials(ctx.obj.state.get_current_fabric(), ctx.obj.state.get(), ctx.obj.state.config) if not credentials.get(): log_error('Cannot delete {0}. Not able to get credentials'.format(obj_kind)) sys.exit(1) # Actual deletion temp_state = deepcopy(ctx.obj.state) res = temp_state.delete_fabric_object(obj_kind, obj_name) if not res.status: log_error('{0} {1!r} cannot be deleted. {2!s}'.format(obj_kind, obj_name, res.value)) sys.exit(1) # Run Terraform with new state terraform = Terraform(temp_state.get_current_fabric(), temp_state, credentials, ctx.obj.version) if not terraform.plan_generate(): log_error('Cannot delete {0}. There is issue with terraform plan generation'.format(obj_kind)) sys.exit(1) terraform_result = terraform.plan_execute() if not terraform_result[0]: log_error('Cannot delete {0}. There is issue with terraform plan execution'.format(obj_kind)) sys.exit(terraform_result[1]) # Set new state to be current and dump it ctx.obj.state = deepcopy(temp_state) ctx.obj.state.dump() log_ok('{0} {1!r} deleted successfully'.format(obj_kind.title(), obj_name)) # Generate SSH configuration ssh_config = SshConfig(ctx.obj.state) if ssh_config.generate_config(): log_info('Generating SSH config...') else: log_warn('Error during SSH config generation') return True
def delete_processor(ctx, processor_name, dry_run): """Delete processor""" obj_kind = ObjectKind.PROCESSOR.value obj_name = processor_name # Check if fabric is set if not ctx.obj.state.get_current_fabric(): log_error('Cannot delete {0}. Please select fabric first.'.format(obj_kind)) sys.exit(1) # Check if naming matches rules obj_name = ctx.obj.state.normalise_state_obj_name(obj_kind, obj_name) log_info('Deleting {0} {1!r}...'.format(obj_kind, obj_name)) # Check if exists if not ctx.obj.state.check_processor(ctx.obj.state.get_current_fabric(), obj_name): log_error("Cannot delete. {0} {1!r} doesn't exist".format(obj_kind.title(), obj_name)) sys.exit(1) # Check if its the only processor in VPC and there are workloads obj_vpc = ctx.obj.state.get_fabric_object(obj_kind, obj_name)['vpc'] processors = [x[0] for x in ctx.obj.state.get_fabric_objects(obj_kind).items() if obj_vpc == x[1]['vpc']] workloads = [x[0] for x in ctx.obj.state.get_fabric_objects(ObjectKind.WORKLOAD.value).items() if obj_vpc == x[1]['vpc']] if len(processors) < 2 and bool(workloads): log_error( "Cannot delete {0!r}. At least one {1} should left in VPC {2!r} to manage workloads: {3}".format( obj_name, obj_kind.title(), obj_vpc, workloads)) sys.exit(1) if dry_run: log_warn('{0} {1!r} to be deleted (started with --dry-run)'.format(obj_kind.title(), obj_name)) return True # Get credentials credentials = Credentials(ctx.obj.state.get_current_fabric(), ctx.obj.state.get(), ctx.obj.state.config) if not credentials.get(): log_error('Cannot delete {0}. Not able to get credentials'.format(obj_kind)) sys.exit(1) # Actual deletion temp_state = deepcopy(ctx.obj.state) res = temp_state.delete_fabric_object(obj_kind, obj_name) if not res.status: log_error('{0} {1!r} cannot be deleted. {2!s}'.format(obj_kind, obj_name, res.value)) sys.exit(1) # Run Terraform with new state terraform = Terraform(temp_state.get_current_fabric(), temp_state, credentials, ctx.obj.version) if not terraform.plan_generate(): log_error('Cannot delete {0}. There is issue with terraform plan generation'.format(obj_kind)) sys.exit(1) terraform_result = terraform.plan_execute() if not terraform_result[0]: log_error('Cannot delete {0}. There is issue with terraform plan execution'.format(obj_kind)) sys.exit(terraform_result[1]) # Set new state to be current and dump it ctx.obj.state = deepcopy(temp_state) ctx.obj.state.dump() log_ok('{0} {1!r} deleted successfully'.format(obj_kind.title(), obj_name)) # Generate SSH configuration ssh_config = SshConfig(ctx.obj.state) if ssh_config.generate_config(): log_info('Generating SSH config...') else: log_warn('Error during SSH config generation') return True
def update_orchestrator(ctx, orchestrator_name, dry_run, all_nodes): """Update orchestrator""" obj_kind = ObjectKind.ORCHESTRATOR.value obj_list, obj_name = None, None temp_state = deepcopy(ctx.obj.state) # Check if fabric is set if not temp_state.get_current_fabric(): log_error( 'Cannot update {0}. Please select fabric first.'.format(obj_kind)) sys.exit(1) # Check target objects to be used if all_nodes: if orchestrator_name: log_error( "Cannot update. Either {}-NAME or option --all should be used". format(obj_kind.upper())) sys.exit(1) obj_all = [ x[0] for x in temp_state.get_fabric_objects(obj_kind).items() ] obj_created = [ x[0] for x in temp_state.get_fabric_objects(obj_kind).items() if temp_state.check_object_state(x[1], ObjectState.CREATED) ] obj_deleting = [ x[0] for x in temp_state.get_fabric_objects(obj_kind).items() if temp_state.check_object_state(x[1], ObjectState.DELETING) ] obj_list = [ x for x in obj_all if x not in obj_deleting and x not in obj_created ] if not obj_list: states = [ObjectState.CONFIGURED.value, ObjectState.UPDATED.value] log_error("Cannot update. There are no {}s in states: {!r}".format( obj_kind, states)) sys.exit(1) if obj_created: log_warn( 'There are {}s in created state: {!r}. Skipping...'.format( obj_kind.lower(), obj_created)) if obj_deleting: log_warn( 'There are {}s in deleting state: {!r}. Skipping...'.format( obj_kind.lower(), obj_deleting)) if not obj_list: states = [ ObjectState.CREATED.value, ObjectState.CONFIGURED.value, ObjectState.UPDATED.value ] log_error("Cannot update. There are no {}s in states: {!r}".format( obj_kind, states)) sys.exit(1) elif orchestrator_name: # Check if naming matches rules obj_name = temp_state.normalise_state_obj_name(obj_kind, orchestrator_name) # Check if exist if not temp_state.check_orchestrator(temp_state.get_current_fabric(), obj_name): log_error("Cannot update. {0} {1!r} doesn't exist".format( obj_kind.title(), obj_name)) sys.exit(1) # Check state obj = temp_state.get_fabric_object(obj_kind, orchestrator_name) if temp_state.check_object_state(obj, ObjectState.DELETING): log_error("Cannot proceed, object is set for deletion!") sys.exit(1) if temp_state.check_object_state(obj, ObjectState.CREATED): log_error("Cannot proceed, object is not configured") sys.exit(1) obj_list = [orchestrator_name] elif orchestrator_name is None: log_error( "Cannot update. Either {}-NAME or option --all should be used". format(obj_kind.upper())) sys.exit(1) # Update dry-run if dry_run: log_warn('{0}s {1!r} to be updated (used with --dry-run)'.format( obj_kind.title(), obj_list)) return True # Update orchestrator log_info('{0}s to be updated: {1!r}'.format(obj_kind.title(), obj_list)) ansible_playbook = "update-orchestrator.yml" res = temp_state.obj_update(ansible_playbook, obj_kind, obj_list) # Set temp state to current and dump it ctx.obj.state = deepcopy(temp_state) ctx.obj.state.dump() if not res[0]: sys.exit(res[1]) return True
def delete_fabric(ctx, fabric_name): """Delete fabric""" # Check if naming matches rules fabric_name = ctx.obj.state.normalise_state_obj_name('fabric', fabric_name) log_info('Deleting fabric {0!r}...'.format(fabric_name)) # Check if fabric exists if not ctx.obj.state.check_fabric(fabric_name): log_error("Cannot delete. Fabric {0!r} doesn't exist".format(fabric_name)) sys.exit(1) # Check for existing VPCs vpc_list = ctx.obj.state.get_fabric_objects('vpc', fabric_name).items() if vpc_list: log_error("Cannot delete. Fabric {0!r} contains VPCs, delete them first:".format(fabric_name)) for vpc in vpc_list: log_error("{0!r}".format(vpc[0])) sys.exit(1) # Check for existing nodes node_list = [] for obj_kind in [ObjectKind.ORCHESTRATOR.value, ObjectKind.PROCESSOR.value, ObjectKind.WORKLOAD.value]: node_list = node_list + list(ctx.obj.state.get_fabric_objects(obj_kind, fabric_name)) if node_list: log_error("Cannot delete. Fabric {0!r} contains nodes, delete them first:".format(fabric_name)) for node in node_list: log_error("{0!r}".format(node[0])) sys.exit(1) # Delete fabric ansible_playbook = "delete-fabric.yml" ansible = Ansible(ctx.obj.state, fabric_name, ctx.obj.state.get_ssh_key()) log_info("Delete {0!r} fabric's files".format(fabric_name)) ansible_result = ansible.run_playbook(ansible_playbook, local=True) if not ansible_result[0]: log_error('Cannot delete fabric. There is issue with ansible playbook execution') sys.exit(ansible_result[1]) # If this is current fabric, unset it if ctx.obj.state.get_current_fabric() == fabric_name: ctx.obj.set_current_fabric(None) # Delete from state res = ctx.obj.state.delete_fabric(fabric_name) if not res.status: log_error('Fabric {0!r} cannot be deleted. {1!s}'.format(fabric_name, res.value)) sys.exit(1) # Delete fabrics batches ctx.obj.state.nodebatch_delete(fabric=fabric_name) # Success log_ok('Fabric {0!r} deleted successfully'.format(fabric_name)) ctx.obj.state.dump() return True