def preflight_check(self): preflight_result = [] # instantiate planet try: self.planet = Planet(self.args.get('planet_name')) except Exception as e: self.preflight_check_result.status = 'FAIL' preflight_result.append( skybase.exceptions.SkyBaseValidationError( 'planet init: {0}'.format(simple_error_format(e)))) # validate stacks for errors before writing to service state registry try: are_stacks_valid = skybase.actions.skycloud.are_stacks_valid( self.planet.orchestration_engine, self.stacks) if not are_stacks_valid: self.preflight_check_result.status = 'FAIL' preflight_result.append( skybase.exceptions.SkyBaseValidationError( 'cannot write service state record with invalid stacks' )) except Exception as e: self.preflight_check_result.status = 'FAIL' preflight_result.append( skybase.exceptions.SkyBaseValidationError( 'test for valid stacks: {0}'.format( simple_error_format(e)))) self.preflight_check_result.set_output(preflight_result) return self.preflight_check_result
def preflight_check(self): # initialize results container preflight_result = [] # instantiate planet try: self.planet = Planet(self.args.get('planet')) except Exception as e: self.preflight_check_result.status = 'FAIL' preflight_result.append( skybase.exceptions.SkyBaseValidationError( 'planet init: {0}'.format(simple_error_format(e)))) try: self.node = sky_chef_actions.get_node(self.planet, self.args.get('node'), self.logger) except Exception as e: self.preflight_check_result.status = 'FAIL' preflight_result.append( skybase.exceptions.SkyBaseValidationError( 'could not find node "{0}": {1}'.format( self.args.get('node'), simple_error_format(e)))) self.preflight_check_result.set_output(preflight_result) return self.preflight_check_result
def preflight_check(self): preflight_result = [] # instantiate planet try: self.planet = Planet(self.args.get('destination_planet')) except Exception as e: self.preflight_check_result.status = 'FAIL' preflight_result.append(skybase.exceptions.SkyBaseValidationError('planet init: {0}'.format(simple_error_format(e)))) self.preflight_check_result.set_output(preflight_result) return self.preflight_check_result
def preflight_check(self): # container for preflight check issues preflight_result = [] # instantiate planet try: self.planet = Planet(self.args.get('planet_name')) except Exception as e: self.preflight_check_result.status = 'FAIL' preflight_result.append(skybase.exceptions.SkyBaseValidationError('planet init: {0}'.format(simple_error_format(e)))) # TODO: test if state record id exists return self.preflight_check_result
def get_stack_status(self): # split service stack id into positional components planet_name = self.service_id.split('/')[1] # acquire provider stack status stack_status = None planet = Planet(planet_name) # TODO/DECISION: fail silently and let context determine action or raise errors? # attempt to acquire stack status try: stack_id = self.stack_info[0].values()[0]['cloud']['stack_id'] stack_status = call_cloud_api( planet=planet, stack_name=stack_id, action='get_instance_info', ) except Exception as e: pass return stack_status
def execute(self): # initialize service status query using skybase service registry id query = PlanetStateDbQuery.init_from_id( self.args['skybase_id'], query_type=PlanetStateQueryTypes.WILDCARD) # execute query and return standard result query_result = query.format_result_set(query.execute()) # tabular output header service_output = '\n{0}\t\t{1}\t\t{2}\n\n'.format( 'ip_address', 'role_name', 'stack_name') # gather and format state information for each stack, role, and # instance ip address for query result for result in query_result: # unpack query result recid, instance_info = result.items()[0] stackname = instance_info['cloud']['stack_name'] # TODO: performance / DRY enhancement: register planet and only init when not present planet_name = recid.split('/')[1] planet = Planet(planet_name) # get stack status stack_status = call_cloud_api( planet=planet, stack_name=stackname, action='get_stack_status', ) # report state information if stack launch complete stack_output = '' if stack_status == 'CREATE_COMPLETE': # call cloud provider for ip addresses stack_info = call_cloud_api( planet=planet, stack_name=stackname, action='get_instance_ip', ) # parse stack, role, instance info for ip addresses and # present in tabular format for instance_role_name, instances in stack_info.items(): # prepare output line for each ip address for inst in instances: # prepare complete line of output role_ip_info = '{0}\t\t{1}\t\t{2}\n\n'.format( str(inst['private_ip_address']), instance_role_name, stackname) # accumulate stack output stack_output += role_ip_info else: # accumulate stack output stack_output += '\n\nWARNING: Stack "{0}" Status is "{1}" - no IP info \n'.format( stackname, stack_status) # accumulate service output service_output = service_output + stack_output # prepare results self.result.output = service_output.strip() self.result.format = skytask.output_format_raw return self.result
def preflight_check(self): preflight_result = [] # TODO: move reusable preflight functions/tests to skybase.skytask.service # instantiate planet from --planet option try: self.planet = Planet(self.planet_name) except Exception as e: self.preflight_check_result.status = 'FAIL' preflight_result.append( SkyBaseValidationError('source planet init: {0}'.format( simple_error_format(e)))) try: # attempt transfer artiball to worker if not os.path.exists( os.path.join(self.runner_cfg.data['artiball_data_dir'], self.args.get('source_artiball'))): artiball_transfer( artiball_key=self.args.get('source_artiball'), bucket_name=self.runner_cfg.data['buckets']['cache'] ['name'], profile=self.runner_cfg.data['buckets']['cache'] ['profile'], release_dir=self.runner_cfg.data['artiball_data_dir']) except boto.exception.S3ResponseError as e: self.preflight_check_result.status = 'FAIL' preflight_result.append( SkyBaseValidationError('artiball transfer: {0}: {1}'.format( type(e).__name__, str(e.message)))) except Exception as e: self.preflight_check_result.status = 'FAIL' preflight_result.append( SkyBaseValidationError('artiball transfer: {0}'.format( simple_error_format(e)))) else: # initialize SkyService from source artiball try: self.source_service = SkyService().init_from_artiball( artiball_name=self.source_artiball, artiball_data_dir=self.runner_cfg.data['artiball_data_dir'] ) self.chef_type = self.source_service.deploy.definition.get( 'chef_type', 'server') except Exception as e: self.preflight_check_result.status = 'FAIL' preflight_result.append( SkyBaseValidationError('sky service init: {0}'.format( simple_error_format(e)))) else: # test CLI stack option values against SkyService stacks try: bad_stacks = [] for stack in self.args['stack_list']: if stack not in self.source_service.deploy.stack_ids: bad_stacks.append(stack) if bad_stacks: self.preflight_check_result.status = 'FAIL' preflight_result.append( SkyBaseValidationError( 'source service {0}: unknown stacks {1}'. format(self.source_service.name, bad_stacks))) else: # given all good stacks, prepare stack launch list target self.source_service.deploy.stack_launch_list = self.args[ 'stack_list'] if self.args[ 'stack_list'] else self.source_service.deploy.stack_ids except Exception as e: self.preflight_check_result.status = 'FAIL' preflight_result.append( SkyBaseValidationError( 'source stack verification: {0}'.format( simple_error_format(e)))) # attempt to read state db record try: serialized_record = skybase.actions.state.read( mode=self.mode, record_id=self.id, credentials=sky_cfg.SkyConfig.init_from_file( 'credentials').data, format='yaml') # DECISION: need general method for processing API in-flight errors if serialized_record.startswith('StateDBRecordNotFoundError'): raise StateDBRecordNotFoundError(self.id) except Exception as e: self.preflight_check_result.status = 'FAIL' preflight_result.append( SkyBaseValidationError(simple_error_format(e))) else: try: self.target_service = yaml.load(serialized_record) self.runtime.tag = self.target_service.tag except Exception as e: self.preflight_check_result.status = 'FAIL' preflight_result.append( SkyBaseValidationError('service registry init: {0}'.format( simple_error_format(e)))) else: try: bad_stacks = [] for stack in self.source_service.deploy.stack_launch_list: if stack not in self.target_service.stacks.deployed_stacks: bad_stacks.append(stack) if bad_stacks: self.preflight_check_result.status = 'FAIL' preflight_result.append( SkyBaseValidationError( 'target service {0}: stacks not deployed {1}'. format(self.target_service.service, bad_stacks))) except Exception as e: self.preflight_check_result.status = 'FAIL' preflight_result.append( SkyBaseValidationError( 'target stack verification: {0}'.format( simple_error_format(e)))) # instantiate planet based on service registry value try: self.target_planet = Planet(self.target_service.planet) except Exception as e: self.preflight_check_result.status = 'FAIL' preflight_result.append( SkyBaseValidationError('target planet init: {0}'.format( simple_error_format(e)))) else: if self.planet.planet_name != self.target_planet.planet_name: self.preflight_check_result.status = 'FAIL' preflight_result.append( 'source planet {0} not equal target planet {1}'.format( self.planet.planet_name, self.target_planet.planet_name)) # test if existing service can be updated if self.source_service and self.target_service: # TODO: test that target and source service names match! is_version_ok = ( self.source_service.manifest.get('app_version') >= self.target_service.metadata.version) if not is_version_ok: self.preflight_check_result.status = 'FAIL' preflight_result.append( SkyBaseValidationError( 'source < target service version: {0} < {1}'. format( self.source_service.manifest.get( 'app_version'), self.target_service.metadata.version))) else: self.preflight_check_result.status = 'FAIL' preflight_result.append( SkyBaseValidationError( 'cannot update service: missing (target / source): ({0} / {1})' .format(self.target_service == None, self.source_service == None))) try: # acquire salt API authtoken self.authtoken = skybase.actions.salt.get_saltapi_authtoken( runner_cfg=self.runner_cfg, planet_name=self.target_service.planet) if self.authtoken is None: self.preflight_check_result.status = 'FAIL' preflight_result.append( SkyBaseValidationError('failed to login to salt API')) except Exception as e: self.preflight_check_result.status = 'FAIL' preflight_result.append( SkyBaseValidationError( 'failed to acquire salt API authtoken: {0}'.format( simple_error_format(e)))) else: for stack in self.source_service.deploy.stack_launch_list: stack_roles = self.target_service.blueprint.get_stack_roles( stack) for role in stack_roles: stack_role_grain = self.target_service.stacks.stacks[ stack].get_stack_role_salt_grain_skybase_id(role) try: saltapi_result = skybase.actions.salt.test_ping_by_grain( grain=stack_role_grain, authtoken=self.authtoken, planet_name=self.target_service.planet, ) except Exception as e: self.preflight_check_result.status = 'FAIL' preflight_result.append( SkyBaseValidationError( 'saltapi test.ping: {0}'.format( simple_error_format(e)))) else: # verify that some minions were targeted if saltapi_result[0] != SkySaltAPI.NO_MINIONS: # verify all minions reply returned True to ping if not all(saltapi_result[0].values()): self.preflight_check_result.status = 'FAIL' preflight_result.append( SkyBaseValidationError( 'unreachable salt minions for stack-role {0}-{1} using grain {2}: {3}' .format(stack, role, stack_role_grain, saltapi_result))) else: self.preflight_check_result.status = 'FAIL' preflight_result.append( SkyBaseValidationError( '{0} using grain: {1}'.format( SkySaltAPI.NO_MINIONS, stack_role_grain))) self.preflight_check_result.set_output(preflight_result) return self.preflight_check_result
def execute(self): # unpack args skybase_id = self.args.get('skybase_id') planet_name = self.args.get('planet_name') service_name = self.args.get('service_name') continent_tag = self.args.get('tag') stack_name = self.args.get('stack_name') verbose = self.args.get('verbose') # test if any query filtering args are provided to drive type of query no_query_args = skybase_id == planet_name == service_name == continent_tag == stack_name == None # initialize service status query if no_query_args: # no query filter args provided; limit output to planet list query = PlanetStateDbQuery( planet=planet_name, service=service_name, tag=continent_tag, stack=stack_name, query_type=PlanetStateQueryTypes.DRILLDOWN, ) elif skybase_id: # query by unique skybase id query = PlanetStateDbQuery.init_from_id(skybase_id,) else: # query by provided arguments query = PlanetStateDbQuery( planet=planet_name, service=service_name, tag=continent_tag, stack=stack_name, query_type=PlanetStateQueryTypes.WILDCARD, ) # execute query and return standard result query_result = query.format_result_set(query.execute()) # extend query results if working with skybase ID or filter args that return stacks if not no_query_args: for result in query_result: # unpack query result recid, info = result.items()[0] stackname = info['cloud']['stack_name'] # TODO: performance / DRY enhancement: register planet and only init when not present planet_name = recid.split('/')[1] planet = Planet(planet_name) # get stack status try: stack_status = call_cloud_api( planet=planet, stack_name=stackname, action='get_stack_status', ) except Exception as e: stack_status = e.message # add skybase ID info['skybase_id'] = recid # add status to results info['cloud'].update({'stack_status': stack_status}) # query cloud provider for current state information if verbose and stack_status == 'CREATE_COMPLETE': instance_info = call_cloud_api( planet=planet, stack_name=stackname, action='get_instance_info', ) # insert roles info into query result object info['roles'] = instance_info # execute query self.result.output = query_result self.result.format = skytask.output_format_json return self.result
def preflight_check(self): # initialize results container preflight_result = [] # instantiate planet try: self.planet = Planet(self.args.get('destination_planet')) except Exception as e: self.preflight_check_result.status = 'FAIL' preflight_result.append( skybase.exceptions.SkyBaseValidationError( 'planet init: {0}'.format(simple_error_format(e)))) # TODO: should artiball transfer *always* occur? OR attempt comparison timestamp / fingerprint to limit? # attempt transfer artiball to worker try: if not os.path.exists( os.path.join(self.runner_cfg.data['artiball_data_dir'], self.args.get('source_artiball'))): skybase.actions.skyenv.artiball_transfer( artiball_key=self.args.get('source_artiball'), bucket_name=self.runner_cfg.data['buckets']['cache'] ['name'], profile=self.runner_cfg.data['buckets']['cache'] ['profile'], release_dir=self.runner_cfg.data['artiball_data_dir']) except Exception as e: self.preflight_check_result.status = 'FAIL' preflight_result.append( skybase.exceptions.SkyBaseValidationError( 'artiball transfer: {0}'.format(simple_error_format(e)))) # initialize SkyService using --artiball value try: self.service = SkyService().init_from_artiball( artiball_name=self.args.get('source_artiball'), artiball_data_dir=self.runner_cfg.data['artiball_data_dir']) # check planet trusted chef cookbook source against artiball's metadata when running chef-server mode if self.service.deploy.definition.get('chef_type', 'server') == 'server': cookbook_source = self.service.manifest['chef_cookbook_source'] trusted_cookbook_source = self.planet._yaml_data['services'][ 'chefserver']['trusted_chef_cookbook_source'] if cookbook_source not in trusted_cookbook_source: self.preflight_check_result.status = 'FAIL' preflight_result.append( 'untrusted chef cookbook source in artiball: ' + cookbook_source) except Exception as e: self.preflight_check_result.status = 'FAIL' preflight_result.append( skybase.exceptions.SkyBaseValidationError( 'sky service init: {0}'.format(simple_error_format(e)))) # test CLI stack option values against SkyService stacks try: bad_stacks = [] for stack in self.args['stack_list']: if stack not in self.service.deploy.stack_ids: bad_stacks.append(stack) if bad_stacks: self.preflight_check_result.status = 'FAIL' preflight_result.append( skybase.exceptions.SkyBaseValidationError( 'service {0}: unknown stacks {1}'.format( self.service.name, bad_stacks))) else: # given all good stacks, prepare stack launch list target self.service.deploy.stack_launch_list = self.args[ 'stack_list'] if self.args[ 'stack_list'] else self.service.deploy.stack_ids except Exception as e: self.preflight_check_result.status = 'FAIL' preflight_result.append( skybase.exceptions.SkyBaseValidationError( 'stack verification: {0}'.format(simple_error_format(e)))) # prepare runtime try: # push client runtime options into runtime object runtime_unpacked = dict( attr.split('=') for attr in self.args.get('runtime')) self.runtime.set_attrs(**runtime_unpacked) except Exception as e: self.preflight_check_result.status = 'FAIL' preflight_result.append( skybase.exceptions.SkyBaseValidationError( 'runtime: {0}'.format(simple_error_format(e)))) self.preflight_check_result.set_output(preflight_result) return self.preflight_check_result
def preflight_check(self): # container for preflight check issues preflight_result = [] # instantiate planet try: self.planet = Planet(self.args.get('planet_name')) except Exception as e: self.preflight_check_result.status = 'FAIL' preflight_result.append( skybase.exceptions.SkyBaseValidationError( 'planet init: {0}'.format(simple_error_format(e)))) # validate required options to delete one or all stacks if not (self.args.get('stack_name') or self.args.get('delete_all_stacks'))\ or (self.args.get('stack_name') and self.args.get('delete_all_stacks')): self.preflight_check_result.status = 'FAIL' preflight_result.append( ('specify one stack or option to delete all stacks for {0}'. format(self.args.get('service_name')))) # validate existence of requested service/stack in state registry query = PlanetStateDbQuery( planet=self.args.get('planet_name'), service=self.args.get('service_name'), tag=self.runtime.tag, stack=self.args.get('stack_name'), query_type='exact', ) # verify unique pointer to service or service/stack if not query.can_find_exact(): self.preflight_check_result.status = 'FAIL' preflight_result.append((skybase.exceptions.SkyBaseDeployError( 'options do not identify a unique service or stack: {0}'. format(query.show_query_path())))) # TODO: push stack path into task result # save validated query path for postprocessing self.stack_path = query.query # reconfigure query to read resources for one or many service stacks query.query_type = query.WILDCARD query.query = query.make_query() # collect all stacks for deletion result_set = query.execute() # collect status of launched stacks for record in result_set: # accumulate list of provider stack ids for deletion self.stack_deletion_list.append(record.cloud.stack_id) try: # verify cloud provider DELETE* status for stack id stack_status = skybase.actions.skycloud.call_cloud_api( planet=self.planet, stack_name=record.cloud.stack_id, action='get_stack_status') except Exception as e: raise skybase.exceptions.SkyBaseDeployError( skybase.utils.simple_error_format(e)) if stack_status.startswith('DELETE'): self.preflight_check_result.status = 'FAIL' preflight_result.append( skybase.exceptions.SkyBaseDeployError( 'cannot delete stack {0} with status {1}'.format( record.cloud.stack_id, stack_status))) # accumulate stack information for deleting state db records and logging result self.stack_deletion_info[record.cloud.id] = { 'stack_id': record.cloud.stack_id, 'stack_name': record.cloud.stack_name, } # determine if deployed service used chef server. if so, then prepare to delete chef nodes # TODO: find authoritative location/source for skybase id definition # skybase state DB id skybase_id = '/{0}/{1}/{2}'.format( self.args.get('planet_name'), self.args.get('service_name'), self.runtime.tag, ) # init service registry record and examine blueprint chef type service_record = ServiceRegistryRecord.init_from_id(skybase_id) self.chef_type = service_record.blueprint.definition.get('chef_type') self.is_chef_type_server = (self.chef_type and self.chef_type == 'server') # prepopulate list of host/instance names for use in chef node delete when chef_type server # NOTE: self.stack_chef_nodes = dict() if self.is_chef_type_server: for skybase_stack_id, stack_info in self.stack_deletion_info.items( ): self.stack_chef_nodes[stack_info[ 'stack_name']] = skybase.actions.skychef.get_stack_chef_nodes( skybase_stack_id=skybase_stack_id, runner_cfg=self.runner_cfg, ) self.preflight_check_result.set_output(preflight_result) return self.preflight_check_result
def delete_stacks(planet_name, service_name, tag, stacks, apply): from skybase.service.state import ServiceRegistryLog result = dict() result['archive'] = [] result['stack_status'] = dict() if not apply: result = { 'delete_stacks': { 'planet_name': planet_name, 'stacks': stacks, 'apply': apply, } } else: # acquire planet state db and connection to cloud provider db = PlanetStateDb() # define record format as list of directory names based on deployment planetdb_basepath = os.path.join( db.db, planet_name, service_name, tag, ) # write first log entry with deployment details log_path = os.path.join(planetdb_basepath, ServiceRegistryLog.FILENAME) with open(log_path, 'a') as service_log: for recid, stack_info in stacks.items(): # unpack stack_info stack_id = stack_info.get('stack_id') stack_launch_name = stack_info.get('stack_name') # verify cloud provider DELETE* status for stack id stack_status = skybase.actions.skycloud.call_cloud_api ( planet=Planet(planet_name), stack_name=stack_id, action='get_stack_status') result['stack_status'][stack_id] = stack_status if not stack_status.startswith('DELETE'): continue # identify record source and archive destination src = os.path.join(db.db, recid.strip('/')) dst = os.path.join(db.archive, recid.strip('/')) # identify resources file srcfile = os.path.join(src, db.resources) dstfile = os.path.join(dst, db.resources) # make archive folder path skybase.utils.mkdir_path(dst) shutil.move(srcfile, dstfile) result['archive'].append(recid) # clean-up non-empty planet registry directory tree from bottom to top while recid: # join database and key target_dir = os.path.join(db.db, recid.strip('/')) # attempt to remove bottom stack path directory # discontinue if not empty of file or other directory try: os.rmdir(target_dir) except OSError: if errno.ENOTEMPTY: break else: raise # remove last element of stack path tempdir = recid.split(os.path.sep) tempdir.pop() recid = os.path.sep.join(tempdir) # DECISION: need globally/app available execution mode setting to be able to read from state db api. how? # TODO: need to acquire current service metadata from state db service. # write service log entry for stack deployment service_log.write('{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6}\t{7}\n'.format( basic_timestamp(), 'DELETE', service_name, 'TODO_METADATA_VERSION', 'TODO_METADATA_BUILD_ID', tag, stack_launch_name, 'TODO_METADATA_SOURCE_ARTIBALL')) return result