def delete_vpc(ctx, vpc_name, dry_run): """Delete VPC""" obj_kind = ObjectKind.VPC.value obj_name = vpc_name # Check if fabric is set if not ctx.obj.state.get_current_fabric(): log_error('Cannot delete VPC. Please select fabric first.') sys.exit(1) # Check if naming matches rules obj_name = ctx.obj.state.normalise_state_obj_name(obj_kind, obj_name) log_info('Deleting VPC {0!r}...'.format(obj_name)) # Check if VPC exists if not ctx.obj.state.check_vpc(ctx.obj.state.get_current_fabric(), obj_name): log_error("Cannot delete. VPC {0!r} doesn't exist".format(obj_name)) sys.exit(1) # Check if there are nodes in VPC node_list = [] for node_kind in [ObjectKind.ORCHESTRATOR.value, ObjectKind.PROCESSOR.value, ObjectKind.WORKLOAD.value]: node_list = node_list + [x for x in ctx.obj.state.get_fabric_objects(node_kind).items() if obj_name in x[1][obj_kind]] if node_list: log_error("Cannot delete. VPC {0!r} contains nodes, delete them first:".format(obj_name)) for node in node_list: log_error("{0!r}".format(node[0])) sys.exit(1) # Delete VPC if dry_run: log_warn('{0} {1!r} to be deleted (started with --dry-run)'.format(obj_kind.upper(), obj_name)) return True # Get credentials credentials = Credentials(ctx.obj.state.get_current_fabric(), ctx.obj.state.get(), ctx.obj.state.config) if not credentials.get(): log_error('Cannot delete VPC. Not able to get credentials') sys.exit(1) # Actual VPC deletion temp_state = deepcopy(ctx.obj.state) res = temp_state.delete_fabric_object(obj_kind, obj_name) if not res.status: log_error('{0} {1!r} cannot be deleted. {2!s}'.format(obj_kind.upper(), obj_name, res.value)) sys.exit(1) # Run Terraform with new state terraform = Terraform(temp_state.get_current_fabric(), temp_state, credentials, ctx.obj.version) if not terraform.plan_generate(): log_error('Cannot delete VPC. There is issue with terraform plan generation') sys.exit(1) terraform_result = terraform.plan_execute() if not terraform_result[0]: log_error('Cannot delete VPC. There is issue with terraform plan execution') sys.exit(terraform_result[1]) # Delete VPCs batches temp_state.nodebatch_delete(vpc=obj_name) # Set new state to be current and dump it ctx.obj.state = deepcopy(temp_state) ctx.obj.state.dump() log_ok('{0} {1!r} deleted successfully'.format(obj_kind.upper(), obj_name)) return True
def parse_file(self, path: str) -> bool: """Get credentials from file""" log_info("Reading credentials file: {!r}".format(path)) try: with open(os.path.expanduser(path), 'r') as cred_f: self.credentials = yaml.safe_load(cred_f) cred_f.close() except FileNotFoundError: log_warn("No credentials file found: {!r}".format(path)) return False return True
def delete_workload(ctx, workload_name, dry_run): """Delete workload""" obj_kind = ObjectKind.WORKLOAD.value obj_name = workload_name # Check if fabric is set if not ctx.obj.state.get_current_fabric(): log_error('Cannot delete {0}. Please select fabric first.'.format(obj_kind)) sys.exit(1) # Check if naming matches rules obj_name = ctx.obj.state.normalise_state_obj_name(obj_kind, obj_name) log_info('Deleting {0} {1!r}...'.format(obj_kind, obj_name)) # Check if exists if not ctx.obj.state.check_workload(ctx.obj.state.get_current_fabric(), obj_name): log_error("Cannot delete. {0} {1!r} doesn't exist".format(obj_kind.title(), obj_name)) sys.exit(1) if dry_run: log_warn('{0} {1!r} to be deleted (started with --dry-run)'.format(obj_kind.title(), obj_name)) return True # Get credentials credentials = Credentials(ctx.obj.state.get_current_fabric(), ctx.obj.state.get(), ctx.obj.state.config) if not credentials.get(): log_error('Cannot delete {0}. Not able to get credentials'.format(obj_kind)) sys.exit(1) # Actual deletion temp_state = deepcopy(ctx.obj.state) res = temp_state.delete_fabric_object(obj_kind, obj_name) if not res.status: log_error('{0} {1!r} cannot be deleted. {2!s}'.format(obj_kind, obj_name, res.value)) sys.exit(1) # Run Terraform with new state terraform = Terraform(temp_state.get_current_fabric(), temp_state, credentials, ctx.obj.version) if not terraform.plan_generate(): log_error('Cannot delete {0}. There is issue with terraform plan generation'.format(obj_kind)) sys.exit(1) terraform_result = terraform.plan_execute() if not terraform_result[0]: log_error('Cannot delete {0}. There is issue with terraform plan execution'.format(obj_kind)) sys.exit(terraform_result[1]) # Set new state to be current and dump it ctx.obj.state = deepcopy(temp_state) ctx.obj.state.dump() log_ok('{0} {1!r} deleted successfully'.format(obj_kind.title(), obj_name)) # Generate SSH configuration ssh_config = SshConfig(ctx.obj.state) if ssh_config.generate_config(): log_info('Generating SSH config...') else: log_warn('Error during SSH config generation') return True
def get_env_gcp(self) -> bool: """Get GCP credentials from env""" credentials_missing: bool = False log_info("Checking environment variables: {}".format( [x['env'] for x in self.params_gcp])) for param in self.params_gcp: env_value: str = os.getenv(param['env'], '') if not env_value: log_warn("{!r} is not set".format(param['env'])) credentials_missing = True else: self.set_gcp_param(param['key'], env_value) if credentials_missing: return False log_info("GCP credentials - OK") return True
def check_azr(self) -> bool: """Check AZR credentials are provided""" credentials_missing: bool = False get_cloud: Result = self.get_cloud('azr') if get_cloud.status: for param in self.params_azr: get_param: Result = self.get_azr_param(param['key']) if not get_param.status: log_warn("{!r} is missing".format(param['key'])) credentials_missing = True else: log_warn("'azr' section is missing") credentials_missing = True if credentials_missing: return False log_info("AZR credentials - OK") return True
def get_output_variable_with_retries(self, module, variable, retries): """Get output variable from terraform plan (with retries)""" terraform_refresh = [ "refresh", "-input=false", "-var", "gcp_credentials=" + self.gcp_credentials, "-var", "gcp_project_name=" + self.gcp_project_name, "-var", "azr_client_id=" + self.azr_client_id, "-var", "azr_client_secret=" + self.azr_client_secret, "-var", "azr_resource_group_name=" + self.azr_resource_group_name, "-var", "azr_subscription_id=" + self.azr_subscription_id, "-var", "azr_tennant_id=" + self.azr_tennant_id, "-var", "image_tag=" + self.image_tag, "-var", "image_version=" + self.image_family.replace(".", "-"), "-var", "dns_managed_zone_domain=" + self.hosted_zone, "-target=module." + module + ".azurerm_virtual_machine.vm" ] # Check if there AWS keys or EC2 role should be used if not self.aws_ec2_role: terraform_refresh.append("-var") terraform_refresh.append("aws_secret_key={}".format(self.s3_secret_key)) terraform_refresh.append("-var") terraform_refresh.append("aws_access_key={}".format(self.s3_access_key_id)) log_info("Getting {0!r} output for {1!r} from terraform state...".format(variable, module)) var = self.get_output_variable(module, variable) i = 1 cwd = os.getcwd() os.chdir(self.terraform_dir) while var is None and i <= retries: log_warn('Retry to get {0!r} output from terraforn state. ({1} of {2})'.format(variable, i, retries)) log_info('Refresh terraform module...') try: if not self.config.get_debug(): sh.terraform(terraform_refresh) else: cmd = sh.terraform(terraform_refresh, _out=self.terraform_process_output, _bg=True) cmd.wait() except sh.ErrorReturnCode as err: log_info(err.full_cmd) log_info('Command output:' + err.stdout.decode('UTF-8').rstrip()) log_error(err.stderr.decode('UTF-8').rstrip(), nl=False) log_error("Unexpected terraform error during refresh (status code {0!r})".format(err.exit_code)) var = self.get_output_variable(module, variable) i = i + 1 os.chdir(cwd) return var
def set_fabric(ctx, fabric_name): """Set current fabric""" # Check fabric exists if not ctx.obj.state.check_fabric(fabric_name): log_error("Fabric {!r} doesn't exist".format(fabric_name)) sys.exit(1) fabric = ctx.obj.state.get_fabric(fabric_name) # Check fabric state and status if ctx.obj.state.check_object_state(fabric, ObjectState.DELETING): log_warn("Proceeding, but object is set for deletion!") ctx.obj.set_current_fabric(fabric_name) log_ok('Active fabric: {0!r}'.format(fabric_name)) return True if not ctx.obj.state.check_object_state(fabric, ObjectState.CONFIGURED) or \ not ctx.obj.state.check_object_status(fabric, ObjectStatus.SUCCESS): log_error("Cannot activate, configure fabric {0!r} first".format(fabric_name)) sys.exit(1) else: ctx.obj.set_current_fabric(fabric_name) log_ok('Active fabric: {0!r}'.format(fabric_name)) return True
def check_gcp(self) -> bool: """Check GCP credentials are provided""" credentials_missing: bool = False # Check 'google_cloud_keyfile_json' get_cloud: Result = self.get_cloud('gcp') if get_cloud.status: param: Dict = self.params_gcp[0] get_param: Result = self.get_gcp_param(param['key']) if not get_param.status: log_warn("{!r} is missing".format(param['key'])) credentials_missing = True else: try: with open(os.path.expanduser(get_param.value), 'r') as cred_f: self.credentials['gcp'] = json.load(cred_f) self.set_gcp_param(param['key'], get_param.value) cred_f.close() except FileNotFoundError: log_info("No GCP credentials file found: {!r}".format( get_param.value)) credentials_missing = True # Check 'project_id' param = self.params_gcp[1] get_param = self.get_gcp_param(param['key']) if not get_param.status: log_warn("{!r} is missing".format(param['key'])) credentials_missing = True else: log_warn("'gcp' section is missing") credentials_missing = True if credentials_missing: return False log_info("GCP credentials - OK") return True
def check_ssh(path: str) -> bool: """Checks if SSH key files are exist""" private_file_name: str = os.path.expanduser(path) public_file_name: str = private_file_name + '.pub' log_info("Checking SSH key files:") key_failure: bool = False if not os.path.isfile(private_file_name): log_warn("Private key file is not found: {!r}".format( private_file_name)) key_failure = True else: log_info("Private key file exists - OK") if not os.path.isfile(public_file_name): log_warn( "Public key file is not found: {!r}".format(public_file_name)) key_failure = True else: log_info("Public key file exists - OK") if key_failure: log_info( "NOTE: If private SSH key is provided as fabric configuration, " "the public part is also expected to exist") return False return True
def check_aws(self) -> bool: """Check AWS credentials are provided""" credentials_missing: bool = False get_cloud: Result = self.get_cloud('aws') if get_cloud.status: get_param: Result = self.get_aws_param('aws_ec2_role') if not get_param.status or not get_param.value: log_warn("{!r} is set to 'false'".format('aws_ec2_role')) for param in self.params_aws: get_param = self.get_aws_param(param['key']) if not get_param.status: log_warn("{!r} is missing".format(param['key'])) credentials_missing = True else: log_info("AWS credentials - OK (EC2 role is configured)") return True else: log_warn("'aws' section is missing") credentials_missing = True if credentials_missing: return False log_info("AWS credentials - OK") return True
def delete_orchestrator(ctx, orchestrator_name, dry_run): """Delete orchestrator""" obj_kind = ObjectKind.ORCHESTRATOR.value obj_name = orchestrator_name # Check if fabric is set if not ctx.obj.state.get_current_fabric(): log_error('Cannot delete {0}. Please select fabric first.'.format(obj_kind)) sys.exit(1) # Check if naming matches rules obj_name = ctx.obj.state.normalise_state_obj_name(obj_kind, obj_name) log_info('Deleting {0} {1!r}...'.format(obj_kind, obj_name)) # Check if exists if not ctx.obj.state.check_orchestrator(ctx.obj.state.get_current_fabric(), obj_name): log_error("Cannot delete. {0} {1!r} doesn't exist".format(obj_kind.title(), obj_name)) sys.exit(1) if dry_run: log_warn('{0} {1!r} to be deleted (started with --dry-run)'.format(obj_kind.title(), obj_name)) return True # Check if configured obj = ctx.obj.state.get_fabric_object(obj_kind, obj_name) if not ctx.obj.state.check_object_state(obj, ObjectState.CREATED): # Delete orchestrator ansible_vars = [] ansible_nodes = [obj_name] ansible_playbook = "delete-" + obj['type'] + ".yml" # Get swarm manager managers = [x for x in ctx.obj.state.get_fabric_objects(obj_kind).items() if 'manager' in x[1]['role']] workers = [x for x in ctx.obj.state.get_fabric_objects(obj_kind).items() if 'worker' in x[1]['role']] if not managers: log_warn('There is no swarm manager found. Nothing should be done with ansible') else: # Check if manager and there are workers if obj['role'] == 'manager' and workers: log_error( '{0} {1!r} is swarm manager. Remove workers first:'.format(obj_kind.title(), orchestrator_name)) for node in workers: log_error("{0!r}".format(node[0])) sys.exit(1) ansible_vars = ansible_vars + [('env_swarm_manager_host', managers[0][0])] ansible_nodes = ansible_nodes + [managers[0][0]] ansible = Ansible(ctx.obj.state, ctx.obj.state.get_current_fabric(), ctx.obj.state.get_ssh_key()) if ansible.inventory_generate(ansible_nodes, node_vars=ansible_vars): ansible_result = ansible.run_playbook(ansible_playbook) if not ansible_result[0]: log_error('Cannot delete {0}. There is issue with ansible playbook execution'.format(obj_kind)) sys.exit(ansible_result[1]) else: sys.exit(1) # Get credentials credentials = Credentials(ctx.obj.state.get_current_fabric(), ctx.obj.state.get(), ctx.obj.state.config) if not credentials.get(): log_error('Cannot delete {0}. Not able to get credentials'.format(obj_kind)) sys.exit(1) # Actual deletion temp_state = deepcopy(ctx.obj.state) res = temp_state.delete_fabric_object(obj_kind, obj_name) if not res.status: log_error('{0} {1!r} cannot be deleted. {2!s}'.format(obj_kind, obj_name, res.value)) sys.exit(1) # Run Terraform with new state terraform = Terraform(temp_state.get_current_fabric(), temp_state, credentials, ctx.obj.version) if not terraform.plan_generate(): log_error('Cannot delete {0}. There is issue with terraform plan generation'.format(obj_kind)) sys.exit(1) terraform_result = terraform.plan_execute() if not terraform_result[0]: log_error('Cannot delete {0}. There is issue with terraform plan execution'.format(obj_kind)) sys.exit(terraform_result[1]) # Set new state to be current and dump it ctx.obj.state = deepcopy(temp_state) ctx.obj.state.dump() log_ok('{0} {1!r} deleted successfully'.format(obj_kind.title(), obj_name)) # Generate SSH configuration ssh_config = SshConfig(ctx.obj.state) if ssh_config.generate_config(): log_info('Generating SSH config...') else: log_warn('Error during SSH config generation') return True
def start_workload(ctx, workload_name, dry_run, all_nodes): """Start workload""" obj_kind = ObjectKind.WORKLOAD.value obj_list, obj_name = None, None # Check if fabric is set if not ctx.obj.state.get_current_fabric(): log_error( 'Cannot start {0}. Please select fabric first.'.format(obj_kind)) sys.exit(1) # Check target objects to be used if all_nodes: if workload_name: log_error( "Cannot start. Either {}-NAME or option --all should be used". format(obj_kind.upper())) sys.exit(1) obj_list = [ x[0] for x in ctx.obj.state.get_fabric_objects(obj_kind).items() if not ctx.obj.state.check_object_state(x[1], ObjectState.DELETING) and not ctx.obj.state.check_object_state(x[1], ObjectState.CREATED) ] if not obj_list: log_error( "Cannot start. There are no configured {}s".format(obj_kind)) sys.exit(1) elif workload_name: # Check if naming matches rules obj_name = ctx.obj.state.normalise_state_obj_name( obj_kind, workload_name) # Check if exist if not ctx.obj.state.check_workload(ctx.obj.state.get_current_fabric(), obj_name): log_error("Cannot start. {0} {1!r} doesn't exist".format( obj_kind.title(), obj_name)) sys.exit(1) # Check state obj = ctx.obj.state.get_fabric_object(obj_kind, obj_name) if ctx.obj.state.check_object_state(obj, ObjectState.DELETING): log_error("Cannot proceed, object is set for deletion!") sys.exit(1) if ctx.obj.state.check_object_state(obj, ObjectState.STARTED) and \ ctx.obj.state.check_object_status(obj, ObjectStatus.SUCCESS): log_warn("{0} {1!r} is already started".format( obj_kind.title(), obj_name)) return True if not ctx.obj.state.check_object_state( obj, [ObjectState.CONFIGURED, ObjectState.STOPPED]): log_error("Cannot start. {0} {1!r} should be configured".format( obj_kind.title(), obj_name)) sys.exit(1) obj_list = [workload_name] elif workload_name is None: log_error( "Cannot start. Either {}-NAME or option --all should be used". format(obj_kind.upper())) sys.exit(1) # Start dry-run if dry_run: log_warn('{0} {1!r} to be started (used with --dry-run)'.format( obj_kind.title(), obj_name)) return True log_info('{0}s to be started: {1!r}'.format(obj_kind.title(), obj_list)) # Start action temp_state = deepcopy(ctx.obj.state) res = temp_state.workload_start(temp_state.get_current_fabric(), obj_list) # Set temp state to current and dump it ctx.obj.state = deepcopy(temp_state) ctx.obj.state.dump() if not res[0]: sys.exit(res[1]) return True
def update_workload(ctx, workload_name, dry_run, all_nodes): """Update workload""" obj_kind = ObjectKind.WORKLOAD.value obj_list, obj_name = None, None temp_state = deepcopy(ctx.obj.state) # Check if fabric is set if not temp_state.get_current_fabric(): log_error( 'Cannot update {0}. Please select fabric first.'.format(obj_kind)) sys.exit(1) # Check target objects to be used if all_nodes: if workload_name: log_error( "Cannot update. Either {}-NAME or option --all should be used". format(obj_kind.upper())) sys.exit(1) obj_all = [ x[0] for x in temp_state.get_fabric_objects(obj_kind).items() ] obj_created = [ x[0] for x in temp_state.get_fabric_objects(obj_kind).items() if temp_state.check_object_state(x[1], ObjectState.CREATED) ] obj_deleting = [ x[0] for x in temp_state.get_fabric_objects(obj_kind).items() if temp_state.check_object_state(x[1], ObjectState.DELETING) ] obj_list = [ x for x in obj_all if x not in obj_deleting and x not in obj_created ] if not obj_list: states = [ ObjectState.CONFIGURED.value, ObjectState.UPDATED.value, ObjectState.STARTED.value, ObjectState.STOPPED.value ] log_error("Cannot update. There are no {}s in states: {!r}".format( obj_kind, states)) sys.exit(1) if obj_created: log_warn( 'There are {}s in created state: {!r}. Skipping...'.format( obj_kind.lower(), obj_created)) if obj_deleting: log_warn( 'There are {}s in deleting state: {!r}. Skipping...'.format( obj_kind.lower(), obj_deleting)) elif workload_name: # Check if naming matches rules obj_name = temp_state.normalise_state_obj_name(obj_kind, workload_name) # Check if exist if not temp_state.check_workload(temp_state.get_current_fabric(), obj_name): log_error("Cannot update. {0} {1!r} doesn't exist".format( obj_kind.title(), obj_name)) sys.exit(1) # Check state obj = temp_state.get_fabric_object(obj_kind, obj_name) if temp_state.check_object_state(obj, ObjectState.DELETING): log_error("Cannot proceed, object is set for deletion!") sys.exit(1) if temp_state.check_object_state(obj, ObjectState.CREATED): log_error("Cannot proceed, object is not configured") sys.exit(1) obj_list = [workload_name] elif workload_name is None: log_error( "Cannot update. Either {}-NAME or option --all should be used". format(obj_kind.upper())) sys.exit(1) # Update dry-run if dry_run: log_warn('{0}s to be updated: {1!r} (used with --dry-run)'.format( obj_kind.title(), obj_list)) return True log_info('{0}s to be updated: {1!r}'.format(obj_kind.title(), obj_list)) # Get action list actions_list = temp_state.update_actions_list(obj_kind, obj_list) if actions_list['update']: ansible_playbook = "update-workload.yml" res = temp_state.obj_update(ansible_playbook, obj_kind, actions_list['update']) ctx.obj.state = deepcopy(temp_state) ctx.obj.state.dump() if not res[0]: sys.exit(res[1]) temp_state = deepcopy(ctx.obj.state) if actions_list['config']: res = temp_state.workload_configure(temp_state.get_current_fabric(), actions_list['config']) ctx.obj.state = deepcopy(temp_state) ctx.obj.state.dump() if not res[0]: sys.exit(res[1]) temp_state = deepcopy(ctx.obj.state) if actions_list['stop']: res = temp_state.workload_stop(temp_state.get_current_fabric(), actions_list['stop']) ctx.obj.state = deepcopy(temp_state) ctx.obj.state.dump() if not res[0]: sys.exit(res[1]) temp_state = deepcopy(ctx.obj.state) if actions_list['start']: res = temp_state.workload_start(temp_state.get_current_fabric(), actions_list['start']) ctx.obj.state = deepcopy(temp_state) ctx.obj.state.dump() if not res[0]: sys.exit(res[1]) return True
def update_orchestrator(ctx, orchestrator_name, dry_run, all_nodes): """Update orchestrator""" obj_kind = ObjectKind.ORCHESTRATOR.value obj_list, obj_name = None, None temp_state = deepcopy(ctx.obj.state) # Check if fabric is set if not temp_state.get_current_fabric(): log_error( 'Cannot update {0}. Please select fabric first.'.format(obj_kind)) sys.exit(1) # Check target objects to be used if all_nodes: if orchestrator_name: log_error( "Cannot update. Either {}-NAME or option --all should be used". format(obj_kind.upper())) sys.exit(1) obj_all = [ x[0] for x in temp_state.get_fabric_objects(obj_kind).items() ] obj_created = [ x[0] for x in temp_state.get_fabric_objects(obj_kind).items() if temp_state.check_object_state(x[1], ObjectState.CREATED) ] obj_deleting = [ x[0] for x in temp_state.get_fabric_objects(obj_kind).items() if temp_state.check_object_state(x[1], ObjectState.DELETING) ] obj_list = [ x for x in obj_all if x not in obj_deleting and x not in obj_created ] if not obj_list: states = [ObjectState.CONFIGURED.value, ObjectState.UPDATED.value] log_error("Cannot update. There are no {}s in states: {!r}".format( obj_kind, states)) sys.exit(1) if obj_created: log_warn( 'There are {}s in created state: {!r}. Skipping...'.format( obj_kind.lower(), obj_created)) if obj_deleting: log_warn( 'There are {}s in deleting state: {!r}. Skipping...'.format( obj_kind.lower(), obj_deleting)) if not obj_list: states = [ ObjectState.CREATED.value, ObjectState.CONFIGURED.value, ObjectState.UPDATED.value ] log_error("Cannot update. There are no {}s in states: {!r}".format( obj_kind, states)) sys.exit(1) elif orchestrator_name: # Check if naming matches rules obj_name = temp_state.normalise_state_obj_name(obj_kind, orchestrator_name) # Check if exist if not temp_state.check_orchestrator(temp_state.get_current_fabric(), obj_name): log_error("Cannot update. {0} {1!r} doesn't exist".format( obj_kind.title(), obj_name)) sys.exit(1) # Check state obj = temp_state.get_fabric_object(obj_kind, orchestrator_name) if temp_state.check_object_state(obj, ObjectState.DELETING): log_error("Cannot proceed, object is set for deletion!") sys.exit(1) if temp_state.check_object_state(obj, ObjectState.CREATED): log_error("Cannot proceed, object is not configured") sys.exit(1) obj_list = [orchestrator_name] elif orchestrator_name is None: log_error( "Cannot update. Either {}-NAME or option --all should be used". format(obj_kind.upper())) sys.exit(1) # Update dry-run if dry_run: log_warn('{0}s {1!r} to be updated (used with --dry-run)'.format( obj_kind.title(), obj_list)) return True # Update orchestrator log_info('{0}s to be updated: {1!r}'.format(obj_kind.title(), obj_list)) ansible_playbook = "update-orchestrator.yml" res = temp_state.obj_update(ansible_playbook, obj_kind, obj_list) # Set temp state to current and dump it ctx.obj.state = deepcopy(temp_state) ctx.obj.state.dump() if not res[0]: sys.exit(res[1]) return True
def delete_processor(ctx, processor_name, dry_run): """Delete processor""" obj_kind = ObjectKind.PROCESSOR.value obj_name = processor_name # Check if fabric is set if not ctx.obj.state.get_current_fabric(): log_error('Cannot delete {0}. Please select fabric first.'.format(obj_kind)) sys.exit(1) # Check if naming matches rules obj_name = ctx.obj.state.normalise_state_obj_name(obj_kind, obj_name) log_info('Deleting {0} {1!r}...'.format(obj_kind, obj_name)) # Check if exists if not ctx.obj.state.check_processor(ctx.obj.state.get_current_fabric(), obj_name): log_error("Cannot delete. {0} {1!r} doesn't exist".format(obj_kind.title(), obj_name)) sys.exit(1) # Check if its the only processor in VPC and there are workloads obj_vpc = ctx.obj.state.get_fabric_object(obj_kind, obj_name)['vpc'] processors = [x[0] for x in ctx.obj.state.get_fabric_objects(obj_kind).items() if obj_vpc == x[1]['vpc']] workloads = [x[0] for x in ctx.obj.state.get_fabric_objects(ObjectKind.WORKLOAD.value).items() if obj_vpc == x[1]['vpc']] if len(processors) < 2 and bool(workloads): log_error( "Cannot delete {0!r}. At least one {1} should left in VPC {2!r} to manage workloads: {3}".format( obj_name, obj_kind.title(), obj_vpc, workloads)) sys.exit(1) if dry_run: log_warn('{0} {1!r} to be deleted (started with --dry-run)'.format(obj_kind.title(), obj_name)) return True # Get credentials credentials = Credentials(ctx.obj.state.get_current_fabric(), ctx.obj.state.get(), ctx.obj.state.config) if not credentials.get(): log_error('Cannot delete {0}. Not able to get credentials'.format(obj_kind)) sys.exit(1) # Actual deletion temp_state = deepcopy(ctx.obj.state) res = temp_state.delete_fabric_object(obj_kind, obj_name) if not res.status: log_error('{0} {1!r} cannot be deleted. {2!s}'.format(obj_kind, obj_name, res.value)) sys.exit(1) # Run Terraform with new state terraform = Terraform(temp_state.get_current_fabric(), temp_state, credentials, ctx.obj.version) if not terraform.plan_generate(): log_error('Cannot delete {0}. There is issue with terraform plan generation'.format(obj_kind)) sys.exit(1) terraform_result = terraform.plan_execute() if not terraform_result[0]: log_error('Cannot delete {0}. There is issue with terraform plan execution'.format(obj_kind)) sys.exit(terraform_result[1]) # Set new state to be current and dump it ctx.obj.state = deepcopy(temp_state) ctx.obj.state.dump() log_ok('{0} {1!r} deleted successfully'.format(obj_kind.title(), obj_name)) # Generate SSH configuration ssh_config = SshConfig(ctx.obj.state) if ssh_config.generate_config(): log_info('Generating SSH config...') else: log_warn('Error during SSH config generation') return True
def delete_batch(ctx, filename, input_format, dry_run, yes): """Delete batch""" log_info('Deleting batch: file={!r}, input=format={!r}, dry-run={!r}'.format(filename, input_format, dry_run)) # Only YAML currently if input_format != 'yaml': log_warn('Only YAML supported') # Safely load YAML # noinspection PyBroadException try: with open(filename, 'r') as config_f: try: batch = yaml.safe_load(config_f) except yaml.YAMLError as err: log_error("Error while loading YAML: {0!r}".format(err)) if hasattr(err, 'problem_mark'): mark = err.problem_mark log_error("Error position: ({}:{})".format(mark.line + 1, mark.column + 1)) sys.exit(1) config_f.close() except IOError as err: log_error(err) sys.exit(1) except Exception: # handle other exceptions such as attribute errors print("Unexpected error:", sys.exc_info()[0]) sys.exit(1) # Parse batch try: batch = BatchSpec(batch, ctx.obj.state.api_version) except TypeError as err: log_error("Cannot parse batch: {0!r}".format(err)) sys.exit(1) # Get objects to be deleted (by fabric) delete_target = {} for obj_kind in [ObjectKind.FABRIC, ObjectKind.VPC, ObjectKind.ORCHESTRATOR, ObjectKind.PROCESSOR, ObjectKind.WORKLOAD]: for obj in batch.get_attr_list(obj_kind): if obj_kind == ObjectKind.FABRIC: fabric_name = obj['metadata']['name'] else: fabric_name = obj['metadata']['fabric'] if fabric_name not in delete_target: delete_target[fabric_name] = {ObjectKind.FABRIC: [], ObjectKind.VPC: [], ObjectKind.WORKLOAD: [], ObjectKind.ORCHESTRATOR: [], ObjectKind.PROCESSOR: []} delete_target[fabric_name][obj_kind].append(batch.get_attr_name(obj)) for fabric in delete_target: for obj_kind in [ObjectKind.FABRIC, ObjectKind.VPC, ObjectKind.ORCHESTRATOR, ObjectKind.PROCESSOR, ObjectKind.WORKLOAD]: if obj_kind in delete_target[fabric]: if delete_target[fabric][obj_kind]: log_info('{0}: {1!r}'.format(obj_kind.value.title(), delete_target[fabric][obj_kind])) def confirm_yes_no(message, default="no"): """Requires confirmation with yes/no""" valid = {"yes": True, "y": True, "ye": True, "no": False, "n": False} prompt = " [y/N] " while True: log_info(message + prompt) choice = input().lower() if choice == '': return valid[default] elif choice in valid: return valid[choice] else: log_info("Please respond with 'yes' or 'no'") if not yes: if not confirm_yes_no('Do you want to delete these objects?'): log_info('Exiting...') return True # For all fabrics in delete target for fabric in delete_target: # Processing fabrics if fabric in delete_target[fabric][ObjectKind.FABRIC]: temp_state_ok = deepcopy(ctx.obj.state) temp_state_failed = deepcopy(ctx.obj.state) if temp_state_failed.check_fabric(fabric): log_warn('Fabric {0!r} is going to be deleted with all nested objects'.format(fabric)) # Set deleting status in state temp_state_failed.set_object_state_status(temp_state_failed.get_fabric(fabric), ObjectState.DELETING, ObjectStatus.FAILED) # Get nested objects nested = [] for obj_kind in [ObjectKind.VPC, ObjectKind.ORCHESTRATOR, ObjectKind.PROCESSOR, ObjectKind.WORKLOAD]: nested.append([obj[0] for obj in temp_state_failed.get_fabric_objects(obj_kind.value, fabric).items()]) # Set nested state to deleting for index, obj_kind in enumerate([ObjectKind.VPC, ObjectKind.ORCHESTRATOR, ObjectKind.PROCESSOR, ObjectKind.WORKLOAD]): for obj_name in nested[index]: obj = temp_state_failed.get_fabric_object(obj_kind.value, obj_name, fabric) temp_state_failed.set_object_state_status(obj, ObjectState.DELETING, ObjectStatus.FAILED) if obj_kind != ObjectKind.VPC: res = temp_state_ok.delete_fabric_object(obj_kind.value, obj_name, fabric) if not res.status: log_error('Fabric {0!r} cannot be deleted. {1!s}'.format(fabric, res.value)) sys.exit(1) # Try to delete nodes-only nested objects nested_nodes = nested[1] + nested[2] + nested[3] if nested_nodes: log_info("Deleting nested nodes first") # Get credentials credentials = Credentials(fabric, temp_state_failed.get(), ctx.obj.state.config) if not credentials.get(): log_error('Batch failed. Not able to get credentials') sys.exit(1) # Run terraform terraform = Terraform(fabric, temp_state_ok, credentials, ctx.obj.version) if not terraform.plan_generate(): log_error('Batch failed to delete nested nodes: {!r}. Exiting'.format(nested_nodes)) ctx.obj.state = deepcopy(temp_state_failed) ctx.obj.state.dump() sys.exit(1) terraform_result = terraform.plan_execute() if not terraform_result[0]: log_error('Batch failed to delete nested nodes: {!r}. Exiting'.format(nested_nodes)) ctx.obj.state = deepcopy(temp_state_failed) ctx.obj.state.dump() sys.exit(terraform_result[1]) # If this is current fabric, unset it if temp_state_failed.get_current_fabric() == fabric: ctx.obj.set_current_fabric(None) # Delete fabric from state res = temp_state_ok.delete_fabric(fabric) if not res.status: log_error('Fabric {0!r} cannot be deleted. {1!s}'.format(fabric, res.value)) sys.exit(1) # Try to delete nested VPCs if nested[0]: log_info("Deleting nested VPCs") # Get credentials credentials = Credentials(fabric, temp_state_failed.get(), ctx.obj.state.config) if not credentials.get(): log_error('Batch failed. Not able to get credentials') sys.exit(1) # Run terraform terraform = Terraform(fabric, temp_state_ok, credentials, ctx.obj.version) if not terraform.plan_generate(): log_error('Batch failed to delete fabric: {!r}. Exiting'.format(fabric)) ctx.obj.state = deepcopy(temp_state_failed) ctx.obj.state.dump() sys.exit(1) terraform_result = terraform.plan_execute() if not terraform_result[0]: log_error('Batch failed to delete fabric: {!r}. Exiting'.format(fabric)) ctx.obj.state = deepcopy(temp_state_failed) ctx.obj.state.dump() sys.exit(terraform_result[1]) # Run ansible playbook ansible_playbook = "delete-fabric.yml" ansible = Ansible(temp_state_failed, fabric, temp_state_failed.get_ssh_key()) log_info("Delete {0!r} fabric's files".format(fabric)) ansible_result = ansible.run_playbook(ansible_playbook, local=True) if not ansible_result[0]: log_error('Cannot delete fabric. There is issue with ansible playbook execution') ctx.obj.state = deepcopy(temp_state_failed) ctx.obj.dump() sys.exit(ansible_result[1]) # Delete fabrics batches temp_state_ok.nodebatch_delete(fabric=fabric) # Success ctx.obj.state = deepcopy(temp_state_ok) ctx.obj.state.dump() # Processing VPCs nested = {} temp_state_ok = deepcopy(ctx.obj.state) temp_state_failed = deepcopy(ctx.obj.state) # For all vpc in delete target vpc_list = delete_target[fabric][ObjectKind.VPC][:] for vpc in vpc_list: # Check if objects are exist if not temp_state_failed.check_fabric(fabric): delete_target[fabric][ObjectKind.VPC].remove(vpc) continue elif not temp_state_failed.check_vpc(fabric, vpc): delete_target[fabric][ObjectKind.VPC].remove(vpc) continue log_warn('VPC {0!r} is going to be deleted with all nested objects'.format(vpc)) # Set deleting status in state obj = temp_state_failed.get_fabric_object(ObjectKind.VPC.value, vpc, fabric) temp_state_failed.set_object_state_status(obj, ObjectState.DELETING, ObjectStatus.FAILED) # Get nested objects nested[vpc] = [] for obj_kind in [ObjectKind.ORCHESTRATOR, ObjectKind.PROCESSOR, ObjectKind.WORKLOAD]: nested[vpc].append([obj[0] for obj in temp_state_failed.get_fabric_objects(obj_kind.value, fabric).items() if vpc in obj[1][ObjectKind.VPC.value]]) # Set nested state to deleting for index, obj_kind in enumerate([ObjectKind.ORCHESTRATOR, ObjectKind.PROCESSOR, ObjectKind.WORKLOAD]): for obj_name in nested[vpc][index]: obj = temp_state_failed.get_fabric_object(obj_kind.value, obj_name, fabric) temp_state_failed.set_object_state_status(obj, ObjectState.DELETING, ObjectStatus.FAILED) res = temp_state_ok.delete_fabric_object(obj_kind.value, obj_name, fabric) if not res.status: log_error('Nested object {0!r} cannot be deleted. {1!s}'.format(obj_name, res.value)) sys.exit(1) # Try to delete nodes-only nested objects delete_nodes = False for vpc in delete_target[fabric][ObjectKind.VPC]: if nested[vpc][0] or nested[vpc][1] or nested[vpc][2]: delete_nodes = True if delete_nodes: log_info("Deleting nested nodes first") # Get credentials credentials = Credentials(fabric, temp_state_failed.get(), ctx.obj.state.config) if not credentials.get(): log_error('Batch failed. Not able to get credentials') sys.exit(1) # Run terraform terraform = Terraform(fabric, temp_state_ok, credentials, ctx.obj.version) if not terraform.plan_generate(): log_error('Batch failed to delete nodes, that nested to VPCs: {!r}. Exiting'. format(delete_target[fabric][ObjectKind.VPC])) ctx.obj.state = deepcopy(temp_state_failed) ctx.obj.state.dump() sys.exit(1) terraform_result = terraform.plan_execute() if not terraform_result[0]: log_error('Batch failed to delete nodes, that nested to VPCs: {!r}. Exiting'. format(delete_target[fabric][ObjectKind.VPC])) ctx.obj.state = deepcopy(temp_state_failed) ctx.obj.state.dump() sys.exit(terraform_result[1]) # Delete VPC from state for vpc in delete_target[fabric][ObjectKind.VPC]: res = temp_state_ok.delete_fabric_object(ObjectKind.VPC.value, vpc, fabric) if not res.status: log_error('VPC {0!r} cannot be deleted. {1!s}'.format(vpc, res.value)) sys.exit(1) # Actual VPC delete if delete_target[fabric][ObjectKind.VPC]: log_info("Deleting VPCs") # Get credentials credentials = Credentials(fabric, temp_state_failed.get(), ctx.obj.state.config) if not credentials.get(): log_error('Batch failed. Not able to get credentials') sys.exit(1) # Run terraform terraform = Terraform(fabric, temp_state_ok, credentials, ctx.obj.version) if not terraform.plan_generate(): log_error('Batch failed to delete VPCs: {!r}. Exiting'.format(delete_target[fabric][ObjectKind.VPC])) ctx.obj.state = deepcopy(temp_state_failed) ctx.obj.state.dump() sys.exit(1) terraform_result = terraform.plan_execute() if not terraform_result[0]: log_error('Batch failed to delete VPCs: {!r}. Exiting'.format(delete_target[fabric][ObjectKind.VPC])) ctx.obj.state = deepcopy(temp_state_failed) ctx.obj.state.dump() sys.exit(terraform_result[1]) # Delete VPCs batches for vpc in delete_target[fabric][ObjectKind.VPC]: temp_state_ok.nodebatch_delete(vpc=vpc) # Success ctx.obj.state = deepcopy(temp_state_ok) ctx.obj.state.dump() # Processing nodes temp_state_ok = deepcopy(ctx.obj.state) temp_state_failed = deepcopy(ctx.obj.state) node_list = {} # For all nodes in delete target for obj_kind in [ObjectKind.ORCHESTRATOR, ObjectKind.PROCESSOR, ObjectKind.WORKLOAD]: node_list[obj_kind] = delete_target[fabric][obj_kind][:] for node in node_list[obj_kind]: # Check if objects are exist if not temp_state_failed.check_fabric(fabric): delete_target[fabric][obj_kind].remove(node) continue else: if obj_kind is ObjectKind.ORCHESTRATOR: if not temp_state_failed.check_orchestrator(fabric, node): delete_target[fabric][obj_kind].remove(node) continue elif obj_kind is ObjectKind.PROCESSOR: if not temp_state_failed.check_processor(fabric, node): delete_target[fabric][obj_kind].remove(node) continue elif obj_kind is ObjectKind.WORKLOAD: if not temp_state_failed.check_workload(fabric, node): delete_target[fabric][obj_kind].remove(node) continue # Set deleting status in state obj = temp_state_failed.get_fabric_object(obj_kind.value, node, fabric) temp_state_failed.set_object_state_status(obj, ObjectState.DELETING, ObjectStatus.FAILED) # Delete node from state res = temp_state_ok.delete_fabric_object(obj_kind.value, node, fabric) if not res.status: log_error('Node {0!r} cannot be deleted. {1!s}'.format(node, res.value)) sys.exit(1) # Check if there is at least one processor left in VPC where workloads are present for proc in delete_target[fabric][ObjectKind.PROCESSOR][:]: # Check if its the only processor in VPC and there are workloads proc_vpc = temp_state_failed.get_fabric_object(ObjectKind.PROCESSOR.value, proc, fabric).get('vpc') processors = [x[0] for x in temp_state_ok.get_fabric_object(ObjectKind.PROCESSOR.value, fabric).items() if proc_vpc == x[1]['vpc']] workloads = [x[0] for x in temp_state_ok.get_fabric_object(ObjectKind.WORKLOAD.value, fabric).items() if proc_vpc == x[1]['vpc']] if not bool(processors) and bool(workloads): log_error("Cannot delete {0!r}. At least one {1} should left in VPC {2!r} to manage workloads: {3}" .format(proc, ObjectKind.PROCESSOR.value.title(), proc_vpc, workloads)) sys.exit(1) # Actual node delete if delete_target[fabric][ObjectKind.ORCHESTRATOR] or delete_target[fabric][ObjectKind.PROCESSOR] or \ delete_target[fabric][ObjectKind.WORKLOAD]: # Get credentials credentials = Credentials(fabric, temp_state_failed.get(), ctx.obj.state.config) if not credentials.get(): log_error('Batch failed. Not able to get credentials') sys.exit(1) # Run terraform terraform = Terraform(fabric, temp_state_ok, credentials, ctx.obj.version) if not terraform.plan_generate(): delete_nodes = delete_target[fabric][ObjectKind.ORCHESTRATOR] + \ delete_target[fabric][ObjectKind.PROCESSOR] + delete_target[fabric][ObjectKind.WORKLOAD] log_error('Batch failed to delete nodes: {!r}. Exiting'.format(delete_nodes)) ctx.obj.state = deepcopy(temp_state_failed) ctx.obj.state.dump() sys.exit(1) terraform_result = terraform.plan_execute() if not terraform_result[0]: log_error('Batch failed to delete nodes: {!r}. Exiting'.format(delete_nodes)) ctx.obj.state = deepcopy(temp_state_failed) ctx.obj.state.dump() sys.exit(terraform_result[1]) # Success ctx.obj.state = deepcopy(temp_state_ok) ctx.obj.state.dump() log_ok('Batch is finished') # Generate SSH configuration ssh_config = SshConfig(ctx.obj.state) if ssh_config.generate_config(): log_info('Generating SSH config...') else: log_warn('Error during SSH config generation') return True
def config_workload(ctx, workload_name, all_nodes, orchestrator_fqdn, location, dry_run, file): """Configure workload""" obj_kind = ObjectKind.WORKLOAD.value # Check if fabric is set if not ctx.obj.state.get_current_fabric(): log_error('Cannot configure {0}. Please select fabric first.'.format( obj_kind)) sys.exit(1) # Generate temporary state temp_state = deepcopy(ctx.obj.state) # Check target objects to be used obj_list = [] if all_nodes: if workload_name: log_error( "Cannot configure. Either {}-NAME or option --all should be used" .format(obj_kind.upper())) sys.exit(1) obj_all = [ x[0] for x in temp_state.get_fabric_objects(obj_kind).items() ] obj_created = [ x[0] for x in temp_state.get_fabric_objects(obj_kind).items() if temp_state.check_object_state(x[1], ObjectState.CREATED) and temp_state.check_object_status(x[1], ObjectStatus.SUCCESS) ] obj_created_failed = [ x[0] for x in temp_state.get_fabric_objects(obj_kind).items() if temp_state.check_object_state(x[1], ObjectState.CREATED) and temp_state.check_object_status(x[1], ObjectStatus.FAILED) ] obj_deleting = [ x[0] for x in temp_state.get_fabric_objects(obj_kind).items() if temp_state.check_object_state(x[1], ObjectState.DELETING) ] obj_list = [ x for x in obj_all if x not in obj_deleting and x not in obj_created_failed ] if not obj_list: states = [ ObjectState.CREATED.value, ObjectState.CONFIGURED.value, ObjectState.UPDATED.value, ObjectState.STARTED.value, ObjectState.STOPPED.value ] log_error( "Cannot configure. There are no {}s in states: {!r}".format( obj_kind, states)) sys.exit(1) if obj_created and orchestrator_fqdn is None: log_error( 'Cannot configure {}. Option "--orchestrator-fqdn" is required by {!r}' .format(obj_kind.lower(), obj_created)) sys.exit(1) if obj_created_failed: log_warn('There are failed {}s during creation: {!r}. Skipping...'. format(obj_kind.lower(), obj_created_failed)) if obj_deleting: log_warn( 'There are {}s in deleting state: {!r}. Skipping...'.format( obj_kind.lower(), obj_deleting)) elif workload_name: # Check if naming matches rules obj_name = temp_state.normalise_state_obj_name(obj_kind, workload_name) # Check if exist if not temp_state.check_workload(temp_state.get_current_fabric(), obj_name): log_error("Cannot configure. {0} {1!r} doesn't exist".format( obj_kind.title(), obj_name)) sys.exit(1) # Check state obj = temp_state.get_fabric_object(obj_kind, workload_name) if temp_state.check_object_state(obj, ObjectState.DELETING): log_error("Cannot proceed, object is set for deletion!") sys.exit(1) if temp_state.check_object_state(obj, ObjectState.CREATED): if orchestrator_fqdn is None: log_error( 'Cannot configure. Missing option "--orchestrator-fqdn"') sys.exit(1) if temp_state.check_object_state(obj, ObjectState.CREATED) and \ temp_state.check_object_status(obj, ObjectStatus.FAILED): log_error( "{0} was created with failures. Run create {1} again before configure" .format(obj_kind.title(), obj_kind)) sys.exit(1) obj_list = [workload_name] elif workload_name is None: log_error( "Cannot configure. Either {}-NAME or option --all should be used". format(obj_kind.upper())) sys.exit(1) # Set configuration for obj_name in obj_list: obj = temp_state.get_fabric_object(obj_kind, obj_name) if orchestrator_fqdn is not None: obj['config']['orchestrator'] = orchestrator_fqdn if location is not None: obj['config']['location'] = location if orchestrator_fqdn is None: log_warn('{0}s {1!r} to be re-configured'.format( obj_kind.title(), obj_list)) # Configure dry-run if dry_run: log_warn('{0}s {1!r} to be configured (used with --dry-run)'.format( obj_kind.title(), obj_list)) return True actions_list = temp_state.configure_actions_list(obj_kind, obj_list) if actions_list['config']: res = temp_state.workload_configure(temp_state.get_current_fabric(), actions_list['config']) if not res[0]: ctx.obj.state = deepcopy(temp_state) ctx.obj.state.dump() sys.exit(res[1]) if actions_list['stop']: res = ctx.obj.state.workload_stop(temp_state.get_current_fabric(), actions_list['stop']) if not res[0]: ctx.obj.state = deepcopy(temp_state) ctx.obj.state.dump() sys.exit(res[1]) if actions_list['start']: res = ctx.obj.state.workload_start(temp_state.get_current_fabric(), actions_list['start']) if not res[0]: ctx.obj.state = deepcopy(temp_state) ctx.obj.state.dump() sys.exit(res[1]) ctx.obj.state = deepcopy(temp_state) ctx.obj.state.dump() if file: click.echo('File: {0}'.format(file)) return True
def config_orchestrator(ctx, orchestrator_name, all_nodes, dry_run, file): """Configure orchestrator""" obj_kind = ObjectKind.ORCHESTRATOR.value # Check if fabric is set if not ctx.obj.state.get_current_fabric(): log_error('Cannot configure {0}. Please select fabric first.'.format( obj_kind)) sys.exit(1) # Generate temporary state temp_state = deepcopy(ctx.obj.state) # Check target objects to be used obj_list = [] if all_nodes: if orchestrator_name: log_error( "Cannot configure. Either {}-NAME or option --all should be used" .format(obj_kind.upper())) sys.exit(1) obj_all = [ x[0] for x in temp_state.get_fabric_objects(obj_kind).items() ] obj_created_failed = [ x[0] for x in temp_state.get_fabric_objects(obj_kind).items() if temp_state.check_object_state(x[1], ObjectState.CREATED) and temp_state.check_object_status(x[1], ObjectStatus.FAILED) ] obj_deleting = [ x[0] for x in temp_state.get_fabric_objects(obj_kind).items() if temp_state.check_object_state(x[1], ObjectState.DELETING) ] obj_list = [ x for x in obj_all if x not in obj_deleting and x not in obj_created_failed ] if not obj_list: states = [ ObjectState.CREATED.value, ObjectState.CONFIGURED.value, ObjectState.UPDATED.value ] log_error( "Cannot configure. There are no {}s in states: {!r}".format( obj_kind, states)) sys.exit(1) if obj_created_failed: log_warn('There are failed {}s during creation: {!r}. Skipping...'. format(obj_kind.lower(), obj_created_failed)) if obj_deleting: log_warn( 'There are {}s in deleting state: {!r}. Skipping...'.format( obj_kind.lower(), obj_deleting)) elif orchestrator_name: # Check if naming matches rules obj_name = temp_state.normalise_state_obj_name(obj_kind, orchestrator_name) # Check if exist if not temp_state.check_orchestrator(temp_state.get_current_fabric(), obj_name): log_error("Cannot configure. {0} {1!r} doesn't exist".format( obj_kind.title(), obj_name)) sys.exit(1) # Check state obj = temp_state.get_fabric_object(obj_kind, orchestrator_name) if temp_state.check_object_state(obj, ObjectState.DELETING): log_error("Cannot proceed, object is set for deletion!") sys.exit(1) if temp_state.check_object_state(obj, ObjectState.CREATED) and \ temp_state.check_object_status(obj, ObjectStatus.FAILED): log_error( "{0} was created with failures. Run create {1} again before configure" .format(obj_kind.title(), obj_kind)) sys.exit(1) obj_list = [orchestrator_name] elif orchestrator_name is None: log_error( "Cannot configure. Either {}-NAME or option --all should be used". format(obj_kind.upper())) sys.exit(1) # Configure dry-run if dry_run: log_warn('{0}s {1!r} to be configured (used with --dry-run)'.format( obj_kind.title(), obj_list)) return True # Check if controller and telemetry are not configured yet passwd_controller = None passwd_grafana = None for orch in obj_list: obj = temp_state.get_fabric_object(obj_kind, orch) if obj['type'] == 'controller' and temp_state.check_object_state(obj, ObjectState.CREATED) and \ temp_state.check_object_status(obj, ObjectStatus.SUCCESS): passwd_controller = temp_state.get_passwd() if obj['type'] == 'telemetry' and temp_state.check_object_state(obj, ObjectState.CREATED) and \ temp_state.check_object_status(obj, ObjectStatus.SUCCESS): passwd_grafana = temp_state.get_passwd() # Check dockerhub credentials credentials = None for orch in obj_list: obj = temp_state.get_fabric_object(obj_kind, orch) if obj['type'] == 'controller': credentials = Credentials(temp_state.get_current_fabric(), temp_state.get(), ctx.obj.state.config) if not credentials.get_docker(): log_error( 'Cannot configure. Not able to get Bayware dockerhub credentials' ) sys.exit(1) # Configure orchestrator res = temp_state.orchestrator_configure( temp_state.get_current_fabric(), obj_list, controller_passwd=passwd_controller, grafana_passwd=passwd_grafana, credentials=credentials) if not res[0]: sys.exit(res[1]) # Show grafana password if passwd_grafana is not None: temp_state.show_passwd(passwd_grafana, 'grafana') # Show controller password if passwd_controller is not None: temp_state.show_passwd(passwd_controller, 'controller') # Dump state ctx.obj.state = deepcopy(temp_state) ctx.obj.state.dump() if file: click.echo('File: {0}'.format(file)) return True