def __init__(self, version=__version__, timestamp=basic_timestamp(), **kwargs): self.version = version self.timestamp = timestamp self.set_attrs(**kwargs)
def ping(**kwargs): start_time = time.time() time.sleep(float(kwargs.get('sleep_interval', 0))) response = { 'data': { 'planet': kwargs.get('planet_name'), 'hostname': socket.gethostname(), 'sleep_interval': float(kwargs.get('sleep_interval')), 'work_in_secs': time.time() - start_time, 'timestamp': basic_timestamp(), }, 'status': sky_cfg.API_STATUS_SUCCESS, } return response
def update(self): try: self.metadata.update() self.blueprint.update() # TODO: COOKBOOK_ONLY first and only update type supported. will eventually derive from --plan option and artiball config log_entry = '{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6}\n'.format( basic_timestamp(), 'UPDATE_RERUN', self.service, self.metadata.version, self.metadata.build, self.tag, self.metadata.artiball, ) self.log.update(log_entry) return True except Exception as e: return simple_error_format(e)
def delete_stacks(planet_name, service_name, tag, stacks, apply): from skybase.service.state import ServiceRegistryLog result = dict() result['archive'] = [] result['stack_status'] = dict() if not apply: result = { 'delete_stacks': { 'planet_name': planet_name, 'stacks': stacks, 'apply': apply, } } else: # acquire planet state db and connection to cloud provider db = PlanetStateDb() # define record format as list of directory names based on deployment planetdb_basepath = os.path.join( db.db, planet_name, service_name, tag, ) # write first log entry with deployment details log_path = os.path.join(planetdb_basepath, ServiceRegistryLog.FILENAME) with open(log_path, 'a') as service_log: for recid, stack_info in stacks.items(): # unpack stack_info stack_id = stack_info.get('stack_id') stack_launch_name = stack_info.get('stack_name') # verify cloud provider DELETE* status for stack id stack_status = skybase.actions.skycloud.call_cloud_api ( planet=Planet(planet_name), stack_name=stack_id, action='get_stack_status') result['stack_status'][stack_id] = stack_status if not stack_status.startswith('DELETE'): continue # identify record source and archive destination src = os.path.join(db.db, recid.strip('/')) dst = os.path.join(db.archive, recid.strip('/')) # identify resources file srcfile = os.path.join(src, db.resources) dstfile = os.path.join(dst, db.resources) # make archive folder path skybase.utils.mkdir_path(dst) shutil.move(srcfile, dstfile) result['archive'].append(recid) # clean-up non-empty planet registry directory tree from bottom to top while recid: # join database and key target_dir = os.path.join(db.db, recid.strip('/')) # attempt to remove bottom stack path directory # discontinue if not empty of file or other directory try: os.rmdir(target_dir) except OSError: if errno.ENOTEMPTY: break else: raise # remove last element of stack path tempdir = recid.split(os.path.sep) tempdir.pop() recid = os.path.sep.join(tempdir) # DECISION: need globally/app available execution mode setting to be able to read from state db api. how? # TODO: need to acquire current service metadata from state db service. # write service log entry for stack deployment service_log.write('{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6}\t{7}\n'.format( basic_timestamp(), 'DELETE', service_name, 'TODO_METADATA_VERSION', 'TODO_METADATA_BUILD_ID', tag, stack_launch_name, 'TODO_METADATA_SOURCE_ARTIBALL')) return result
def write_service_state_record(planet_name, service_name, tag, registration, provider, stacks): from skybase.service.state import ServiceRegistryMetadata, ServiceRegistryBlueprint, ServiceRegistryLog # connect to database db = PlanetStateDb() # define record format as list of directory names based on deployment planetdb_basepath = os.path.join( db.db, planet_name, service_name, tag, ) response = dict() # make unique service directory and write service information to files mkdir_path(planetdb_basepath) # write service metadata (manifest + artiball source) metadata_path = os.path.join(planetdb_basepath, ServiceRegistryMetadata.FILENAME) with open(metadata_path, 'w') as f: f.write(yaml.safe_dump(registration.get('metadata'), default_flow_style=False)) # write original main deployment yaml contents as 'blueprint' blueprint_path = os.path.join(planetdb_basepath, ServiceRegistryBlueprint.FILENAME) with open(blueprint_path, 'w') as f: f.write(yaml.safe_dump(registration.get('blueprint'), default_flow_style=False)) # write first log entry with deployment details log_path = os.path.join(planetdb_basepath, ServiceRegistryLog.FILENAME) with open(log_path, 'a') as service_log: # create planet state record for each stack in db for stack_name, stack_info in stacks.items(): # create unique path planetdb_stack_path = os.path.join(planetdb_basepath, stack_name) # make unique stack directory mkdir_path(planetdb_stack_path) # create planet state DB record filename planetdb_record = os.path.join( planetdb_stack_path, db.resources ) # template for cloud resource file contents cloud_resource = { 'cloud': { 'provider': provider, } } # merge stack information into template cloud_resource['cloud'].update(stack_info) # write stack launch information to resource file with open(planetdb_record, 'w') as f: f.write(yaml.safe_dump( cloud_resource, default_flow_style=False)) # planet state response points to file response[stack_name] = prepare_record_id(db.db, db.resources, planetdb_record) # write service log entry for stack deployment service_log.write('{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6}\t{7}\n'.format( basic_timestamp(), 'DEPLOY', service_name, registration.get('metadata', {}).get('app_version'), registration.get('metadata', {}).get('build_id'), tag, stack_info['name'], registration.get('metadata', {}).get('source_artiball'))) return response