def while_waiting(self): """ Update fields periodically if nothing is happening """ # give a little extra time for file descriptors to close time.sleep(0.1) self.addfield.value = Timestamp() self.addfield.display() self.addfield2.value = Uptime() self.addfield2.display() self.addfield3.value = str(len(Containers())) + ' running' if len(Containers()) > 0: self.addfield3.labelColor = 'GOOD' else: self.addfield3.labelColor = 'DEFAULT' self.addfield3.display() # if file drop location changes deal with it logger = Logger(__name__) if self.file_drop.value != DropLocation()[1]: logger.info('Starting: file drop restart') try: self.file_drop.value = DropLocation()[1] logger.info('Path given: ' + str(self.file_drop.value)) except Exception as e: # pragma no cover logger.error('file drop restart failed with error: ' + str(e)) logger.info('Finished: file drop restart') self.file_drop.display() return
def __init__(self, **kargs): self.path_dirs = PathDirs(**kargs) self.manifest = join(self.path_dirs.meta_dir, 'plugin_manifest.cfg') self.p_helper = PluginHelper(**kargs) self.d_client = docker.from_env() self.logger = Logger(__name__) self.plugin_config_file = self.path_dirs.plugin_config_file
def __init__(self, *args, **kwargs): self.d_client = docker.from_env() self.path_dirs = PathDirs(**kwargs) self.manifest = join(self.path_dirs.meta_dir, 'plugin_manifest.cfg') self.vent_config = self.path_dirs.cfg_file self.startup_file = self.path_dirs.startup_file self.logger = Logger(__name__) self._auto_install()
def __init__(self, version='HEAD', branch='master', user=None, pw=None, *args, **kwargs): self.version = version self.branch = branch self.user = user self.pw = pw self.d_client = docker.from_env() self.path_dirs = PathDirs(**kwargs) self.path_dirs.host_config() self.manifest = join(self.path_dirs.meta_dir, 'plugin_manifest.cfg') self.logger = Logger(__name__)
def __init__(self, action_dict=None, action_name=None, *args, **keywords): api_action = Tools() action = {'api_action': api_action} if action_dict: action.update(action_dict) logger = Logger(action_name) InventoryForm.__init__(self, action, logger, *args, **keywords)
def __init__(self, *args, **keywords): """ Initialize tool form objects """ self.logger = Logger(__name__) self.logger.info(str(keywords['names'])) self.api_action = Action() self.m_helper = MenuHelper() action = {'api_action': self.api_action} self.tools_tc = {} self.repo_widgets = {} if keywords['action_dict']: action.update(keywords['action_dict']) if keywords['names']: i = 1 for name in keywords['names']: action['action_object' + str(i)] = getattr( self.api_action, name) i += 1 self.action = action # get list of all possible group views to display self.views = deque() possible_groups = set() manifest = Template(self.api_action.plugin.manifest) if self.action['cores']: tools = self.api_action.inventory(choices=['core'])[1]['core'] else: tools = self.api_action.inventory(choices=['tools'])[1]['tools'] for tool in tools: groups = manifest.option(tool, 'groups')[1].split(',') for group in groups: # don't do core because that's the purpose of all in views if group != '' and group != 'core': possible_groups.add(group) self.manifest = manifest self.views += possible_groups self.views.append('all groups') self.no_instance = ['build', 'remove'] super(ToolForm, self).__init__(*args, **keywords)
def __init__(self, *args, **keywords): """ Initialize tool form objects """ self.logger = Logger(__name__) self.api_action = System() self.tools_inst = Tools() action = {'api_action': self.tools_inst} self.tools_tc = {} self.repo_widgets = {} if keywords['action_dict']: action.update(keywords['action_dict']) if keywords['names']: i = 1 for name in keywords['names']: try: action['action_object' + str(i)] = getattr(self.tools_inst, name) except AttributeError: action['action_object' + str(i)] = getattr(self.api_action, name) i += 1 self.action = action # get list of all possible group views to display self.views = deque() possible_groups = set() manifest = Template(self.api_action.manifest) tools = self.tools_inst.inventory(choices=['tools'])[1]['tools'] for tool in tools: groups = manifest.option(tool, 'groups')[1].split(',') for group in groups: # don't do core because that's the purpose of all in views if group != '' and group != 'core': possible_groups.add(group) self.manifest = manifest self.views += possible_groups self.views.append('all groups') self.no_instance = ['remove'] super(ToolForm, self).__init__(*args, **keywords)
def while_waiting(self): """ Update fields periodically if nothing is happening """ # give a little extra time for file descriptors to close time.sleep(0.1) self.addfield.value = Timestamp() self.addfield.display() self.addfield2.value = Uptime() self.addfield2.display() self.addfield3.value = str(len(Containers())) + ' running' if len(Containers()) > 0: self.addfield3.labelColor = 'GOOD' else: self.addfield3.labelColor = 'DEFAULT' self.addfield3.display() # update core tool status self.addfield5.value, values = MainForm.t_status(True) if values[0] + values[1] == 0: color = 'DANGER' self.addfield4.labelColor = 'CAUTION' self.addfield4.value = 'Idle' elif values[0] >= int(values[2]): color = 'GOOD' self.addfield4.labelColor = color self.addfield4.value = 'Ready to start jobs' else: color = 'CAUTION' self.addfield4.labelColor = color self.addfield4.value = 'Ready to start jobs' self.addfield5.labelColor = color # update plugin tool status plugin_str, values = MainForm.t_status(False) plugin_str += ', ' + str(values[3]) + ' plugin(s) installed' self.addfield6.value = plugin_str # get jobs jobs = Jobs() # number of jobs, number of tool containers self.addfield7.value = str(jobs[0]) + ' jobs running (' + str(jobs[1]) self.addfield7.value += ' tool containers), ' + str(jobs[2]) self.addfield7.value += ' completed jobs' if jobs[0] > 0: self.addfield4.labelColor = 'GOOD' self.addfield4.value = 'Processing jobs' self.addfield7.labelColor = 'GOOD' else: self.addfield7.labelColor = 'DEFAULT' self.addfield4.display() self.addfield5.display() self.addfield6.display() self.addfield7.display() # if file drop location changes deal with it logger = Logger(__name__) status = (False, None) if self.file_drop.value != DropLocation()[1]: logger.info('Starting: file drop restart') try: self.file_drop.value = DropLocation()[1] logger.info('Path given: ' + str(self.file_drop.value)) # restart if the path is valid if DropLocation()[0]: status = self.api_action.clean(name='file_drop') status = self.api_action.prep_start(name='file_drop') else: logger.error('file drop path name invalid' + DropLocation()[1]) if status[0]: tool_d = status[1] status = self.api_action.start(tool_d) logger.info('Status of file drop restart: ' + str(status[0])) except Exception as e: # pragma no cover logger.error('file drop restart failed with error: ' + str(e)) logger.info('Finished: file drop restart') self.file_drop.display() return
def __init__(self, manifest, *args, **kwargs): self.path_dirs = PathDirs(**kwargs) self.manifest = manifest self.d_client = docker.from_env() self.logger = Logger(__name__)
class Action: """ Handle actions in menu """ def __init__(self, **kargs): self.plugin = Plugin(**kargs) self.d_client = self.plugin.d_client self.vent_config = os.path.join(self.plugin.path_dirs.meta_dir, "vent.cfg") self.logger = Logger(__name__) def add(self, repo, tools=None, overrides=None, version="HEAD", branch="master", build=True, user=None, pw=None, groups=None, version_alias=None, wild=None, remove_old=True, disable_old=True): """ Add a new set of tool(s) """ self.logger.info("Starting: add") status = (True, None) try: status = self.plugin.add(repo, tools=tools, overrides=overrides, version=version, branch=branch, build=build, user=user, pw=pw, groups=groups, version_alias=version_alias, wild=wild, remove_old=remove_old, disable_old=disable_old) except Exception as e: # pragma: no cover self.logger.error("add failed with error: " + str(e)) status = (False, e) self.logger.info("Status of add: " + str(status)) self.logger.info("Finished: add") return status def remove(self, repo=None, namespace=None, name=None, groups=None, enabled="yes", branch="master", version="HEAD", built="yes"): """ Remove tools or a repo """ self.logger.info("Starting: remove") status = (True, None) try: status = self.plugin.remove(name=name, repo=repo, namespace=namespace, groups=groups, enabled=enabled, branch=branch, version=version, built=built) except Exception as e: self.logger.error("remove failed with error: " + str(e)) status = (False, e) self.logger.info("Status of remove: " + str(status)) self.logger.info("Finished: remove") return status def prep_start(self, repo=None, name=None, groups=None, enabled="yes", branch="master", version="HEAD", run_build=False): """ Start a set of tools that match the parameters given, if no parameters are given, start all installed tools on the master branch at verison HEAD that are enabled """ args = locals() self.logger.info("Starting: prep_start") self.logger.info("Arguments: " + str(args)) status = (True, None) tool_dict = {} try: del args['run_build'] options = [ 'name', 'namespace', 'built', 'groups', 'path', 'image_name', 'branch', 'version' ] vent_config = Template(template=self.vent_config) files = vent_config.option('main', 'files') sections, template = self.plugin.constraint_options(args, options) for section in sections: # initialize needed vars template_path = os.path.join(sections[section]['path'], 'vent.template') container_name = sections[section]['image_name'].replace( ':', '-') container_name = container_name.replace('/', '-') image_name = sections[section]['image_name'] # checkout the right version and branch of the repo self.plugin.branch = branch self.plugin.version = version cwd = os.getcwd() self.logger.info("current directory is: " + str(cwd)) os.chdir(os.path.join(sections[section]['path'])) status = self.plugin.checkout() self.logger.info(status) os.chdir(cwd) if run_build: status = self.build(name=sections[section]['name'], groups=groups, enabled=enabled, branch=branch, version=version) self.logger.info(status) # set docker settings for container vent_template = Template(template_path) status = vent_template.section('docker') self.logger.info(status) tool_dict[container_name] = { 'image': image_name, 'name': container_name } if status[0]: for option in status[1]: options = option[1] # check for commands to evaluate if '`' in options: cmds = options.split('`') # TODO this probably needs better error checking to handle mismatched `` if len(cmds) > 2: i = 1 while i < len(cmds): try: cmds[i] = subprocess.check_output( shlex.split(cmds[i]), stderr=subprocess.STDOUT, close_fds=True).strip() except Exception as e: # pragma: no cover self.logger.error( "unable to evaluate command specified in vent.template: " + str(e)) i += 2 options = "".join(cmds) # store options set for docker try: tool_dict[container_name][ option[0]] = ast.literal_eval(options) except Exception as e: # pragma: no cover self.logger.error( "unable to store the options set for docker: " + str(e)) tool_dict[container_name][option[0]] = options # get temporary name for links, etc. status = vent_template.section('info') self.logger.info(status) plugin_config = Template(template=self.plugin.manifest) status, plugin_sections = plugin_config.sections() self.logger.info(status) for plugin_section in plugin_sections: status = plugin_config.option(plugin_section, "link_name") self.logger.info(status) image_status = plugin_config.option( plugin_section, "image_name") self.logger.info(image_status) if status[0] and image_status[0]: cont_name = image_status[1].replace(':', '-') cont_name = cont_name.replace('/', '-') if cont_name not in tool_dict: tool_dict[cont_name] = { 'image': image_status[1], 'name': cont_name, 'start': False } tool_dict[cont_name]['tmp_name'] = status[1] # add extra labels if 'labels' not in tool_dict[container_name]: tool_dict[container_name]['labels'] = {} tool_dict[container_name]['labels']['vent'] = Version() tool_dict[container_name]['labels'][ 'vent.namespace'] = sections[section]['namespace'] tool_dict[container_name]['labels']['vent.branch'] = branch tool_dict[container_name]['labels']['vent.version'] = version tool_dict[container_name]['labels']['vent.name'] = sections[ section]['name'] if 'groups' in sections[section]: # add labels for groups tool_dict[container_name]['labels'][ 'vent.groups'] = sections[section]['groups'] # send logs to syslog if 'syslog' not in sections[section][ 'groups'] and 'core' in sections[section]['groups']: tool_dict[container_name]['log_config'] = { 'type': 'syslog', 'config': { 'syslog-address': 'tcp://0.0.0.0:514', 'syslog-facility': 'daemon', 'tag': 'core' } } if 'syslog' not in sections[section]['groups']: tool_dict[container_name]['log_config'] = { 'type': 'syslog', 'config': { 'syslog-address': 'tcp://0.0.0.0:514', 'syslog-facility': 'daemon', 'tag': 'plugin' } } # mount necessary directories if 'files' in sections[section]['groups']: if 'volumes' in tool_dict[container_name]: tool_dict[container_name]['volumes'][ self.plugin.path_dirs.base_dir[:-1]] = { 'bind': '/vent', 'mode': 'ro' } else: tool_dict[container_name]['volumes'] = { self.plugin.path_dirs.base_dir[:-1]: { 'bind': '/vent', 'mode': 'ro' } } if files[0]: tool_dict[container_name]['volumes'][files[1]] = { 'bind': '/files', 'mode': 'ro' } else: tool_dict[container_name]['log_config'] = { 'type': 'syslog', 'config': { 'syslog-address': 'tcp://0.0.0.0:514', 'syslog-facility': 'daemon', 'tag': 'plugin' } } # add label for priority status = vent_template.section('settings') self.logger.info(status) if status[0]: for option in status[1]: if option[0] == 'priority': tool_dict[container_name]['labels'][ 'vent.priority'] = option[1] # only start tools that have been built if sections[section]['built'] != 'yes': del tool_dict[container_name] # check and update links, volumes_from, network_mode for container in tool_dict.keys(): if 'links' in tool_dict[container]: for link in tool_dict[container]['links']: for c in tool_dict.keys(): if 'tmp_name' in tool_dict[c] and tool_dict[c][ 'tmp_name'] == link: tool_dict[container]['links'][ tool_dict[c]['name']] = tool_dict[ container]['links'].pop(link) if 'volumes_from' in tool_dict[container]: tmp_volumes_from = tool_dict[container]['volumes_from'] tool_dict[container]['volumes_from'] = [] for volumes_from in list(tmp_volumes_from): for c in tool_dict.keys(): if 'tmp_name' in tool_dict[c] and tool_dict[c][ 'tmp_name'] == volumes_from: tool_dict[container]['volumes_from'].append( tool_dict[c]['name']) tmp_volumes_from.remove(volumes_from) tool_dict[container]['volumes_from'] += tmp_volumes_from if 'network_mode' in tool_dict[container]: if tool_dict[container]['network_mode'].startswith( 'container:'): network_c_name = tool_dict[container][ 'network_mode'].split('container:')[1] for c in tool_dict.keys(): if 'tmp_name' in tool_dict[c] and tool_dict[c][ 'tmp_name'] == network_c_name: tool_dict[container][ 'network_mode'] = 'container:' + tool_dict[ c]['name'] # remove tmp_names for c in tool_dict.keys(): if 'tmp_name' in tool_dict[c]: del tool_dict[c]['tmp_name'] # remove containers that shouldn't be started for c in tool_dict.keys(): if 'start' in tool_dict[c] and not tool_dict[c]['start']: del tool_dict[c] except Exception as e: self.logger.error("prep_start failed with error: " + str(e)) status = (False, e) status = (True, tool_dict) self.logger.info("Status of prep_start: " + str(status)) self.logger.info("Finished: prep_start") return status def start(self, tool_dict): """ Start a set of tools that match the parameters given, if no parameters are given, start all installed tools on the master branch at verison HEAD that are enabled """ self.logger.info("Starting: start") status = (True, None) try: # check start priorities (priority of groups is alphabetical for now) group_orders = {} groups = [] containers_remaining = [] for container in tool_dict: containers_remaining.append(container) if 'labels' in tool_dict[container]: if 'vent.groups' in tool_dict[container]['labels']: groups += tool_dict[container]['labels'][ 'vent.groups'].split(',') if 'vent.priority' in tool_dict[container]['labels']: priorities = tool_dict[container]['labels'][ 'vent.priority'].split(',') container_groups = tool_dict[container]['labels'][ 'vent.groups'].split(',') for i, priority in enumerate(priorities): if container_groups[i] not in group_orders: group_orders[container_groups[i]] = [] group_orders[container_groups[i]].append( (int(priority), container)) containers_remaining.remove(container) # start containers based on priorities groups = sorted(set(groups)) started_containers = [] for group in groups: if group in group_orders: for container_tuple in sorted(group_orders[group]): if container_tuple[1] not in started_containers: started_containers.append(container_tuple[1]) try: try: container = self.d_client.containers.get( container_tuple[1]) container.start() self.logger.info("started " + str(container_tuple[1]) + " with ID: " + str(container.short_id)) except Exception as err: # pragma: no cover self.logger.error(str(err)) container_id = self.d_client.containers.run( detach=True, **tool_dict[container_tuple[1]]) self.logger.info("started " + str(container_tuple[1]) + " with ID: " + str(container_id)) except Exception as e: # pragma: no cover self.logger.error("failed to start " + str(container_tuple[1]) + " because: " + str(e)) # start the rest of the containers that didn't have any priorities set for container in containers_remaining: try: try: c = self.d_client.containers.get(container) c.start() self.logger.info("started " + str(container) + " with ID: " + str(c.short_id)) except Exception as err: # pragma: no cover self.logger.error(str(err)) container_id = self.d_client.containers.run( detach=True, **tool_dict[container]) self.logger.info("started " + str(container) + " with ID: " + str(container_id)) except Exception as e: # pragma: no cover self.logger.error("failed to start " + str(container) + " because: " + str(e)) except Exception as e: self.logger.error("start failed with error: " + str(e)) status = (False, e) self.logger.info("Status of start: " + str(status)) self.logger.info("Finished: start") return status def update(self, repo=None, name=None, groups=None, enabled="yes", branch="master", version="HEAD"): """ Update a set of tools that match the parameters given, if no parameters are given, updated all installed tools on the master branch at verison HEAD that are enabled """ args = locals() self.logger.info("Starting: update") self.logger.info(args) status = (True, None) try: options = ['path', 'image_name', 'image_id'] sections, template = self.plugin.constraint_options(args, options) # get existing containers and images and states running_containers = Containers() built_images = Images() self.logger.info("running docker containers: " + str(running_containers)) self.logger.info("built docker images: " + str(built_images)) # if repo, pull and build # if registry image, pull for section in sections: try: cwd = os.getcwd() self.logger.info("current working directory: " + str(cwd)) os.chdir(sections[section]['path']) self.plugin.version = version self.plugin.branch = branch c_status = self.plugin.checkout() self.logger.info(c_status) try: os.chdir(cwd) except Exception as e: # pragma: no cover self.logger.error("unable to change directory: " + str(e)) pass template = self.plugin.builder( template, sections[section]['path'], sections[section]['image_name'], section, build=True, branch=branch, version=version) self.logger.info(template) # stop and remove old containers and images if image_id updated # !! TODO # start containers if they were running # !! TODO # TODO logging except Exception as e: # pragma: no cover self.logger.error("unable to update: " + str(section) + " because: " + str(e)) template.write_config() except Exception as e: self.logger.error("update failed with error: " + str(e)) status = (False, e) self.logger.info("Status of update: " + str(status)) self.logger.info("Finished: update") return status def stop(self, repo=None, name=None, groups=None, enabled="yes", branch="master", version="HEAD"): """ Stop a set of tools that match the parameters given, if no parameters are given, stop all installed tools on the master branch at verison HEAD that are enabled """ args = locals() self.logger.info("Starting: stop") self.logger.info(args) status = (True, None) try: # !! TODO need to account for plugin containers that have random names, use labels perhaps options = [ 'name', 'namespace', 'built', 'groups', 'path', 'image_name', 'branch', 'version' ] sections, template = self.plugin.constraint_options(args, options) self.logger.info(sections) for section in sections: container_name = sections[section]['image_name'].replace( ':', '-') container_name = container_name.replace('/', '-') try: container = self.d_client.containers.get(container_name) container.stop() self.logger.info("stopped " + str(container_name)) except Exception as e: # pragma: no cover self.logger.error("failed to stop " + str(container_name) + " because: " + str(e)) except Exception as e: self.logger.error("stop failed with error: " + str(e)) status = (False, e) self.logger.info("Status of stop: " + str(status)) self.logger.info("Finished: stop") return status def clean(self, repo=None, name=None, groups=None, enabled="yes", branch="master", version="HEAD"): """ Clean (stop and remove) a set of tools that match the parameters given, if no parameters are given, clean all installed tools on the master branch at verison HEAD that are enabled """ args = locals() self.logger.info("Starting: clean") self.logger.info(args) status = (True, None) try: # !! TODO need to account for plugin containers that have random names, use labels perhaps options = [ 'name', 'namespace', 'built', 'groups', 'path', 'image_name', 'branch', 'version' ] sections, template = self.plugin.constraint_options(args, options) self.logger.info(sections) for section in sections: container_name = sections[section]['image_name'].replace( ':', '-') container_name = container_name.replace('/', '-') try: container = self.d_client.containers.get(container_name) container.remove(force=True) self.logger.info("cleaned " + str(container_name)) except Exception as e: # pragma: no cover self.logger.error("failed to clean " + str(container_name) + " because: " + str(e)) except Exception as e: self.logger.error("clean failed with error: " + str(e)) status = (False, e) self.logger.info("Status of clean: " + str(status)) self.logger.info("Finished: clean") return status def build(self, repo=None, name=None, groups=None, enabled="yes", branch="master", version="HEAD"): """ Build a set of tools that match the parameters given """ args = locals() self.logger.info("Starting: build") self.logger.info(args) status = (True, None) try: options = ['image_name', 'path'] sections, template = self.plugin.constraint_options(args, options) self.logger.info(sections) for section in sections: self.logger.info("Building " + str(section) + " ...") template = self.plugin.builder(template, sections[section]['path'], sections[section]['image_name'], section, build=True, branch=branch, version=version) template.write_config() except Exception as e: self.logger.error("build failed with error: " + str(e)) status = (False, e) self.logger.info("Status of build: " + str(status)) self.logger.info("Finished: build") return status def cores(self, action, branch="master"): """ Supply action (install, build, start, stop, clean) for core tools """ self.logger.info("Starting: cores") status = (True, None) try: self.logger.info("action provided: " + str(action)) core = Core(branch=branch) if action in ["install", "build"]: tools = [] plugins = Plugin(plugins_dir=".internals/plugins") plugins.version = 'HEAD' plugins.branch = branch plugins.apply_path('https://github.com/cyberreboot/vent') response = plugins.checkout() self.logger.info("status of plugin checkout " + str(response)) matches = plugins._available_tools(groups='core') for match in matches: tools.append((match[0], '')) status = plugins.add('https://github.com/cyberreboot/vent', tools=tools, branch=branch, build=False) self.logger.info("status of plugin add: " + str(status)) plugin_config = Template(template=self.plugin.manifest) sections = plugin_config.sections() for tool in core['normal']: for section in sections[1]: name = plugin_config.option(section, "name") orig_branch = plugin_config.option(section, "branch") namespace = plugin_config.option(section, "namespace") version = plugin_config.option(section, "version") if name[1] == tool and orig_branch[ 1] == branch and namespace[ 1] == "cyberreboot/vent" and version[ 1] == "HEAD": plugin_config.set_option( section, "image_name", "cyberreboot/vent-" + tool + ":" + branch) plugin_config.write_config() if action == "build": plugin_config = Template(template=self.plugin.manifest) sections = plugin_config.sections() try: for tool in core['normal']: for section in sections[1]: image_name = plugin_config.option( section, "image_name") if image_name[ 1] == "cyberreboot/vent-" + tool + ":" + branch: try: # currently can't use docker-py because it # returns a 404 on pull so no way to valid if it # worked or didn't #image_id = self.d_client.images.pull('cyberreboot/vent-'+tool, tag=branch) image_id = None output = subprocess.check_output( shlex.split( "docker pull cyberreboot/vent-" + tool + ":" + branch), stderr=subprocess.STDOUT) for line in output.split('\n'): if line.startswith("Digest: sha256:"): image_id = line.split( "Digest: sha256:")[1][:12] if image_id: plugin_config.set_option( section, "built", "yes") plugin_config.set_option( section, "image_id", image_id) plugin_config.set_option( section, "last_updated", str(datetime.datetime.utcnow()) + " UTC") status = (True, "Pulled " + tool) self.logger.info(str(status)) else: plugin_config.set_option( section, "built", "failed") plugin_config.set_option( section, "last_updated", str(datetime.datetime.utcnow()) + " UTC") status = (False, "Failed to pull image " + str(output.split('\n')[-1])) self.logger.error(str(status)) except Exception as e: # pragma: no cover plugin_config.set_option( section, "built", "failed") plugin_config.set_option( section, "last_updated", str(datetime.datetime.utcnow()) + " UTC") status = (False, "Failed to pull image " + str(e)) self.logger.error(str(status)) except Exception as e: # pragma: no cover status = (False, "Failed to pull images " + str(e)) self.logger.error(str(status)) plugin_config.write_config() elif action == "start": status = self.prep_start(groups="core", branch=branch) if status[0]: tool_dict = status[1] status = self.start(tool_dict) elif action == "stop": status = self.stop(groups="core", branch=branch) elif action == "clean": status = self.clean(groups="core", branch=branch) except Exception as e: self.logger.info("core failed with error: " + str(e)) status = (False, e) self.logger.info("Status of core: " + str(status)) self.logger.info("Finished: core") return status @staticmethod def backup(): # TODO return @staticmethod def restore(): # TODO return @staticmethod def configure(): # TODO # tools, core, etc. return @staticmethod def system_commands(): # reset, upgrade, etc. return def logs(self, container_type=None, grep_list=None): """ generically filter logs stored in log containers """ self.logger.info("Starting: logs") status = (True, None) log_entries = {} try: containers = self.d_client.containers.list( all=True, filters={'label': 'vent'}) self.logger.info("containers found: " + str(containers)) if grep_list: compare_containers = containers if container_type: try: compare_containers = [ c for c in containers if (container_type in c.attrs['Config']['Labels'] ['vent.groups']) ] except Exception as e: # pragma: no cover self.logger.error( "Unable to limit containers by container_type: " + str(container_type) + " because: " + str(e)) for expression in grep_list: for container in compare_containers: try: # 'logs' stores each line which contains the expression logs = [ log for log in container.logs().split("\n") if expression in log ] for log in logs: if str(container.name) in log_entries: log_entries[str( container.name)].append(log) else: log_entries[str(container.name)] = [log] except Exception as e: # pragma: no cover self.logger.error("Unable to get logs for " + str(container.name) + " because: " + str(e)) else: compare_containers = containers if container_type: try: compare_containers = [ c for c in containers if (container_type in c.attrs['Config']['Labels'] ['vent.groups']) ] except Exception as e: # pragma: no cover self.logger.error( "Unable to limit containers by container_type: " + str(container_type) + " because: " + str(e)) for container in compare_containers: try: logs = container.logs().split("\n") for log in logs: if str(container.name) in log_entries: log_entries[str(container.name)].append(log) else: log_entries[str(container.name)] = [log] except Exception as e: # pragma: no cover self.logger.error("Unable to get logs for " + str(container.name) + " because: " + str(e)) status = (True, log_entries) except Exception as e: self.logger.error("logs failed with error: " + str(e)) status = (False, e) self.logger.info("Status of logs: " + str(status)) self.logger.info("Finished: logs") return status @staticmethod def help(): # TODO return def inventory(self, choices=None): """ Return a dictionary of the inventory items and status """ self.logger.info("Starting: inventory") status = (True, None) self.logger.info("choices specified: " + str(choices)) try: # choices: repos, core, tools, images, built, running, enabled items = { 'repos': [], 'core': [], 'tools': [], 'images': [], 'built': [], 'running': [], 'enabled': [] } tools = self.plugin.tools() self.logger.info("found tools: " + str(tools)) for choice in choices: for tool in tools: try: if choice == 'repos': if 'repo' in tool: if tool['repo'] and tool['repo'] not in items[ choice]: items[choice].append(tool['repo']) elif choice == 'core': if 'groups' in tool: if 'core' in tool['groups']: items[choice].append( (tool['section'], tool['name'])) elif choice == 'tools': items[choice].append( (tool['section'], tool['name'])) elif choice == 'images': # TODO also check against docker images = Images() items[choice].append( (tool['section'], tool['name'], tool['image_name'])) elif choice == 'built': items[choice].append( (tool['section'], tool['name'], tool['built'])) elif choice == 'running': containers = Containers() status = 'not running' for container in containers: image_name = tool['image_name'].rsplit( ":" + tool['version'], 1)[0] image_name = image_name.replace(':', '-') image_name = image_name.replace('/', '-') if container[0] == image_name: status = container[1] items[choice].append( (tool['section'], tool['name'], status)) elif choice == 'enabled': items[choice].append( (tool['section'], tool['name'], tool['enabled'])) else: # unknown choice pass except Exception as e: # pragma: no cover self.logger.error( "unable to grab information about tool: " + str(tool) + " because: " + str(e)) pass status = (True, items) except Exception as e: self.logger.error("inventory failed with error: " + str(e)) status = (False, e) self.logger.info("Status of inventory: " + str(status)) self.logger.info("Finished: inventory") return status
class UpdateToolsForm(npyscreen.ActionForm): """ For picking which tools to update """ tools_tc = {} triggered = 0 logger = Logger(__name__) def create(self): self.add_handlers({"^T": self.change_forms, "^Q": self.quit}) self.add( npyscreen.TitleText, name='Select which tools to update (only plugin tools are shown):', editable=False) def while_waiting(self): """ Update with current tools that are not cores """ if not self.triggered: i = 4 api_action = Action() response = api_action.inventory(choices=['repos', 'tools', 'core']) if response[0]: inventory = response[1] for repo in inventory['repos']: if repo != 'https://github.com/cyberreboot/vent': repo_name = repo.rsplit("/", 2)[1:] self.tools_tc[repo] = {} title_text = self.add(npyscreen.TitleText, name='Plugin: ' + repo, editable=False, rely=i, relx=5) title_text.display() i += 1 for tool in inventory['tools']: r_name = tool[0].split(":") if repo_name[0] == r_name[0] and repo_name[ 1] == r_name[1]: core = False for item in inventory['core']: if tool[0] == item[0]: core = True t = tool[1] if t == "": t = "/" if not core: t += ":" + ":".join( tool[0].split(":")[-2:]) self.tools_tc[repo][t] = self.add( npyscreen.CheckBox, name=t, value=True, relx=10) self.tools_tc[repo][t].display() i += 1 i += 2 self.triggered = 1 return def quit(self, *args, **kwargs): self.parentApp.switchForm("MAIN") def on_ok(self): """ Take the tool selections and update them """ def diff(first, second): """ Get the elements that exist in the first list and not in the second """ second = set(second) return [item for item in first if item not in second] def popup(original, orig_type, thr, title): """ Start the thread and display a popup of info until the thread is finished """ thr.start() info_str = "" while thr.is_alive(): if orig_type == 'containers': info = diff(Containers(), original) elif orig_type == 'images': info = diff(Images(), original) if info: info_str = "" for entry in info: # TODO limit length of info_str to fit box info_str += entry[0] + ": " + entry[1] + "\n" npyscreen.notify_wait(info_str, title=title) time.sleep(1) return original_containers = Containers() api_action = Action() for repo in self.tools_tc: for tool in self.tools_tc[repo]: self.logger.info(tool) if self.tools_tc[repo][tool].value: t = tool if t.startswith('/:'): t = " " + t[1:] t = t.split(":") thr = threading.Thread(target=api_action.update, args=(), kwargs={ 'name': t[0], 'branch': t[1], 'version': t[2] }) popup(original_containers, "containers", thr, 'Please wait, updating containers...') npyscreen.notify_confirm("Done updating containers.", title='Updated containers') self.quit() def on_cancel(self): self.quit() def change_forms(self, *args, **keywords): """ Toggles to main """ change_to = "MAIN" # Tell the VentApp object to change forms. self.parentApp.change_form(change_to)
class Tools: def __init__(self, version='HEAD', branch='master', user=None, pw=None, *args, **kwargs): self.version = version self.branch = branch self.user = user self.pw = pw self.d_client = docker.from_env() self.path_dirs = PathDirs(**kwargs) self.path_dirs.host_config() self.manifest = join(self.path_dirs.meta_dir, 'plugin_manifest.cfg') self.logger = Logger(__name__) def new(self, tool_type, uri, tools=None, link_name=None, image_name=None, overrides=None, tag=None, registry=None, groups=None): try: # remove tools that are already installed from being added if isinstance(tools, list): i = len(tools) - 1 while i >= 0: tool = tools[i] if tool[0].find('@') >= 0: tool_name = tool[0].split('@')[-1] else: tool_name = tool[0].rsplit('/', 1)[-1] constraints = {'name': tool_name, 'repo': uri.split('.git')[0]} prev_installed, _ = Template(template=self.manifest).constrain_opts(constraints, []) # don't reinstall if prev_installed: tools.remove(tool) i -= 1 if len(tools) == 0: tools = None except Exception as e: # pragma: no cover self.logger.error('Add failed with error: {0}'.format(str(e))) return (False, str(e)) if tool_type == 'image': status = Image(self.manifest).add(uri, link_name, tag=tag, registry=registry, groups=groups) else: if tool_type == 'core': uri = 'https://github.com/cyberreboot/vent' core = True else: core = False status = Repository(self.manifest).add(uri, tools, overrides=overrides, version=self.version, image_name=image_name, branch=self.branch, user=self.user, pw=self.pw, core=core) return status def configure(self, tool): # TODO return def inventory(self, choices=None): """ Return a dictionary of the inventory items and status """ status = (True, None) if not choices: return (False, 'No choices made') try: # choices: repos, tools, images, built, running, enabled items = {'repos': [], 'tools': {}, 'images': {}, 'built': {}, 'running': {}, 'enabled': {}} tools = Template(self.manifest).list_tools() for choice in choices: for tool in tools: try: if choice == 'repos': if 'repo' in tool: if (tool['repo'] and tool['repo'] not in items[choice]): items[choice].append(tool['repo']) elif choice == 'tools': items[choice][tool['section']] = tool['name'] elif choice == 'images': # TODO also check against docker items[choice][tool['section']] = tool['image_name'] elif choice == 'built': items[choice][tool['section']] = tool['built'] elif choice == 'running': containers = Containers() status = 'not running' for container in containers: image_name = tool['image_name'] \ .rsplit(':' + tool['version'], 1)[0] image_name = image_name.replace(':', '-') image_name = image_name.replace('/', '-') self.logger.info('image_name: ' + image_name) if container[0] == image_name: status = container[1] elif container[0] == image_name + \ '-' + tool['version']: status = container[1] items[choice][tool['section']] = status elif choice == 'enabled': items[choice][tool['section']] = tool['enabled'] else: # unknown choice pass except Exception as e: # pragma: no cover self.logger.error('Unable to grab info about tool: ' + str(tool) + ' because: ' + str(e)) status = (True, items) except Exception as e: # pragma: no cover self.logger.error( 'Inventory failed with error: {0}'.format(str(e))) status = (False, str(e)) return status def remove(self, repo, name): args = locals() status = (True, None) # get resulting dict of sections with options that match constraints template = Template(template=self.manifest) results, _ = template.constrain_opts(args, []) for result in results: response, image_name = template.option(result, 'image_name') name = template.option(result, 'name')[1] try: settings_dict = json.loads(template.option(result, 'settings')[1]) instances = int(settings_dict['instances']) except Exception: instances = 1 try: # check for container and remove c_name = image_name.replace(':', '-').replace('/', '-') for i in range(1, instances + 1): container_name = c_name + str(i) if i != 1 else c_name container = self.d_client.containers.get(container_name) response = container.remove(v=True, force=True) self.logger.info( 'Removing container: {0}'.format(container_name)) except Exception as e: # pragma: no cover self.logger.warning('Unable to remove the container: ' + container_name + ' because: ' + str(e)) # check for image and remove try: response = None image_id = template.option(result, 'image_id')[1] response = self.d_client.images.remove(image_id, force=True) self.logger.info('Removing image: ' + image_name) except Exception as e: # pragma: no cover self.logger.warning('Unable to remove the image: ' + image_name + ' because: ' + str(e)) # remove tool from the manifest for i in range(1, instances + 1): res = result.rsplit(':', 2) res[0] += str(i) if i != 1 else '' res = ':'.join(res) if template.section(res)[0]: status = template.del_section(res) self.logger.info('Removing tool: ' + res) # TODO if all tools from a repo have been removed, remove the repo template.write_config() return status def start(self, repo, name, is_tool_d=False): if is_tool_d: tool_d = repo else: args = locals() del args['self'] del args['is_tool_d'] tool_d = {} tool_d.update(self._prep_start(**args)[1]) status = (True, None) try: # check start priorities (priority of groups alphabetical for now) group_orders = {} groups = [] containers_remaining = [] username = getpass.getuser() # remove tools that have the hidden label tool_d_copy = copy.deepcopy(tool_d) for container in tool_d_copy: if 'labels' in tool_d_copy[container] and 'vent.groups' in tool_d_copy[container]['labels']: groups_copy = tool_d_copy[container]['labels']['vent.groups'].split( ',') if 'hidden' in groups_copy: del tool_d[container] for container in tool_d: containers_remaining.append(container) self.logger.info( "User: '******' starting container: {1}".format(username, container)) if 'labels' in tool_d[container]: if 'vent.groups' in tool_d[container]['labels']: groups += tool_d[container]['labels']['vent.groups'].split( ',') if 'vent.priority' in tool_d[container]['labels']: priorities = tool_d[container]['labels']['vent.priority'].split( ',') container_groups = tool_d[container]['labels']['vent.groups'].split( ',') for i, priority in enumerate(priorities): if container_groups[i] not in group_orders: group_orders[container_groups[i]] = [] group_orders[container_groups[i]].append( (int(priority), container)) containers_remaining.remove(container) tool_d[container]['labels'].update( {'started-by': username} ) else: tool_d[container].update( {'labels': {'started-by': username}} ) # start containers based on priorities p_results = self._start_priority_containers(groups, group_orders, tool_d) # start the rest of the containers that didn't have any priorities r_results = self._start_remaining_containers( containers_remaining, tool_d) results = (p_results[0] + r_results[0], p_results[1] + r_results[1]) if len(results[1]) > 0: status = (False, results) else: status = (True, results) except Exception as e: # pragma: no cover self.logger.error('Start failed with error: {0}'.format(str(e))) status = (False, str(e)) return status def _prep_start(self, repo, name): args = locals() status = (True, None) try: options = ['name', 'namespace', 'built', 'groups', 'path', 'image_name', 'branch', 'repo', 'type', 'version'] vent_config = Template(template=self.path_dirs.cfg_file) manifest = Template(self.manifest) files = vent_config.option('main', 'files') files = (files[0], expanduser(files[1])) s, _ = manifest.constrain_opts(args, options) status, tool_d = self._start_sections(s, files) # look out for links to delete because they're defined externally links_to_delete = set() # get instances for each tool tool_instances = {} sections = manifest.sections()[1] for section in sections: settings = manifest.option(section, 'settings') if settings[0]: settings = json.loads(settings[1]) if 'instances' in settings: l_name = manifest.option(section, 'link_name') if l_name[0]: tool_instances[l_name[1]] = int( settings['instances']) # check and update links, volumes_from, network_mode for container in list(tool_d.keys()): if 'labels' not in tool_d[container] or 'vent.groups' not in tool_d[container]['labels'] or 'core' not in tool_d[container]['labels']['vent.groups']: tool_d[container]['remove'] = True if 'links' in tool_d[container]: for link in list(tool_d[container]['links'].keys()): # add links to external services already running if # necessary, by default configure local services too configure_local = True ext = 'external-services' if link in vent_config.options(ext)[1]: try: lconf = json.loads(vent_config.option(ext, link)[1]) if ('locally_active' not in lconf or lconf['locally_active'] == 'no'): ip_adr = lconf['ip_address'] port = lconf['port'] tool_d[container]['extra_hosts'] = {} # containers use lowercase names for # connections tool_d[container]['extra_hosts'][link.lower() ] = ip_adr # create an environment variable for container # to access port later env_variable = link.upper() + \ '_CUSTOM_PORT=' + port if 'environment' not in tool_d[container]: tool_d[container]['environment'] = [] tool_d[container]['environment'].append( env_variable) # remove the entry from links because no # longer connecting to local container links_to_delete.add(link) configure_local = False except Exception as e: # pragma: no cover self.logger.error( 'Could not load external settings because: {0}'.format(str(e))) configure_local = True status = False if configure_local: for c in list(tool_d.keys()): if ('tmp_name' in tool_d[c] and tool_d[c]['tmp_name'] == link): tool_d[container]['links'][tool_d[c]['name'] ] = tool_d[container]['links'].pop(link) if link in tool_instances and tool_instances[link] > 1: for i in range(2, tool_instances[link] + 1): tool_d[container]['links'][tool_d[c]['name'] + str( i)] = tool_d[container]['links'][tool_d[c]['name']] + str(i) if 'volumes_from' in tool_d[container]: tmp_volumes_from = tool_d[container]['volumes_from'] tool_d[container]['volumes_from'] = [] for volumes_from in list(tmp_volumes_from): for c in list(tool_d.keys()): if ('tmp_name' in tool_d[c] and tool_d[c]['tmp_name'] == volumes_from): tool_d[container]['volumes_from'].append( tool_d[c]['name']) tmp_volumes_from.remove(volumes_from) tool_d[container]['volumes_from'] += tmp_volumes_from if 'network_mode' in tool_d[container]: if tool_d[container]['network_mode'].startswith('container:'): network_c_name = tool_d[container]['network_mode'].split('container:')[ 1] for c in list(tool_d.keys()): if ('tmp_name' in tool_d[c] and tool_d[c]['tmp_name'] == network_c_name): tool_d[container]['network_mode'] = 'container:' + \ tool_d[c]['name'] # remove tmp_names for c in list(tool_d.keys()): if 'tmp_name' in tool_d[c]: del tool_d[c]['tmp_name'] # remove links section if all were externally configured for c in list(tool_d.keys()): if 'links' in tool_d[c]: for link in links_to_delete: if link in tool_d[c]['links']: del tool_d[c]['links'][link] # delete links if no more defined if not tool_d[c]['links']: del tool_d[c]['links'] # remove containers that shouldn't be started for c in list(tool_d.keys()): deleted = False if 'start' in tool_d[c] and not tool_d[c]['start']: del tool_d[c] deleted = True if not deleted: # look for tools services that are being done externally # tools are capitalized in vent.cfg, so make them lowercase # for comparison ext = 'external-services' external_tools = vent_config.section(ext)[1] name = tool_d[c]['labels']['vent.name'] for tool in external_tools: if name == tool[0].lower(): try: tool_config = json.loads(tool[1]) if ('locally_active' in tool_config and tool_config['locally_active'] == 'no'): del tool_d[c] except Exception as e: # pragma: no cover self.logger.warning('Locally running container ' + name + ' may be redundant') if status: status = (True, tool_d) else: status = (False, tool_d) except Exception as e: # pragma: no cover self.logger.error('_prep_start failed with error: '+str(e)) status = (False, e) return status def _start_sections(self, s, files): tool_d = {} status = (True, None) for section in s: # initialize needed vars c_name = s[section]['image_name'].replace(':', '-') c_name = c_name.replace('/', '-') instance_num = re.search(r'\d+$', s[section]['name']) if instance_num: c_name += instance_num.group() image_name = s[section]['image_name'] # checkout the right version and branch of the repo tool_d[c_name] = {'image': image_name, 'name': c_name} # get rid of all commented sections in various runtime # configurations manifest = Template(self.manifest) overall_dict = {} for setting in ['info', 'docker', 'gpu', 'settings', 'service']: option = manifest.option(section, setting) if option[0]: overall_dict[setting] = {} settings_dict = json.loads(option[1]) for opt in settings_dict: if not opt.startswith('#'): overall_dict[setting][opt] = settings_dict[opt] if 'docker' in overall_dict: options_dict = overall_dict['docker'] for option in options_dict: options = options_dict[option] # check for commands to evaluate if '`' in options: cmds = options.split('`') if len(cmds) > 2: i = 1 while i < len(cmds): try: cmds[i] = check_output(shlex.split(cmds[i]), stderr=STDOUT, close_fds=True).strip().decode('utf-8') except Exception as e: # pragma: no cover self.logger.error( 'unable to evaluate command specified in vent.template: ' + str(e)) i += 2 options = ''.join(cmds) # check for commands to evaluate # store options set for docker try: tool_d[c_name][option] = ast.literal_eval(options) except Exception as e: # pragma: no cover self.logger.debug( 'Unable to literal_eval: {0}'.format(str(options))) tool_d[c_name][option] = options if 'labels' not in tool_d[c_name]: tool_d[c_name]['labels'] = {} # get the service uri info if 'service' in overall_dict: try: options_dict = overall_dict['service'] for option in options_dict: tool_d[c_name]['labels'][option] = options_dict[option] except Exception as e: # pragma: no cover self.logger.error('unable to store service options for ' 'docker: ' + str(e)) # check for gpu settings if 'gpu' in overall_dict: try: options_dict = json.loads(status[1]) for option in options_dict: tool_d[c_name]['labels']['gpu.' + option] = options_dict[option] except Exception as e: # pragma: no cover self.logger.error('unable to store gpu options for ' 'docker: ' + str(e)) # get temporary name for links, etc. plugin_c = Template(template=self.manifest) status, plugin_sections = plugin_c.sections() for plugin_section in plugin_sections: status = plugin_c.option(plugin_section, 'link_name') image_status = plugin_c.option(plugin_section, 'image_name') if status[0] and image_status[0]: cont_name = image_status[1].replace(':', '-') cont_name = cont_name.replace('/', '-') if cont_name not in tool_d: tool_d[cont_name] = {'image': image_status[1], 'name': cont_name, 'start': False} tool_d[cont_name]['tmp_name'] = status[1] # add extra labels tool_d[c_name]['labels']['vent'] = Version() tool_d[c_name]['labels']['vent.namespace'] = s[section]['namespace'] tool_d[c_name]['labels']['vent.branch'] = s[section]['branch'] tool_d[c_name]['labels']['vent.version'] = s[section]['version'] tool_d[c_name]['labels']['vent.name'] = s[section]['name'] tool_d[c_name]['labels']['vent.section'] = section tool_d[c_name]['labels']['vent.repo'] = s[section]['repo'] tool_d[c_name]['labels']['vent.type'] = s[section]['type'] # check for log_config settings in external-services externally_configured = False vent_config = Template(self.path_dirs.cfg_file) for ext_tool in vent_config.section('external-services')[1]: if ext_tool[0].lower() == 'syslog': try: log_dict = json.loads(ext_tool[1]) # configure if not locally active if ('locally_active' not in log_dict or log_dict['locally_active'] == 'no'): del log_dict['locally_active'] log_config = {} log_config['type'] = 'syslog' log_config['config'] = {} ip_address = '' port = '' for option in log_dict: if option == 'ip_address': ip_address = log_dict[option] elif option == 'port': port = log_dict['port'] syslog_address = 'tcp://' + ip_address + ':' + port syslog_config = {'syslog-address': syslog_address, 'syslog-facility': 'daemon', 'tag': '{{.Name}}'} log_config['config'].update(syslog_config) externally_configured = True except Exception as e: # pragma: no cover self.logger.error('external settings for log_config' " couldn't be stored because: " + str(e)) externally_configured = False if not externally_configured: log_config = {'type': 'syslog', 'config': {'syslog-address': 'tcp://0.0.0.0:514', 'syslog-facility': 'daemon', 'tag': '{{.Name}}'}} if 'groups' in s[section]: # add labels for groups tool_d[c_name]['labels']['vent.groups'] = s[section]['groups'] # add restart=always to core containers if 'core' in s[section]['groups']: tool_d[c_name]['restart_policy'] = {'Name': 'always'} # map network names to environment variables if 'network' in s[section]['groups']: vent_config = Template(template=self.path_dirs.cfg_file) nic_mappings = vent_config.section('network-mapping') nics = '' if nic_mappings[0]: for nic in nic_mappings[1]: nics += nic[0] + ':' + nic[1] + ',' nics = nics[:-1] if nics: if 'environment' in tool_d[c_name]: tool_d[c_name]['environment'].append( 'VENT_NICS='+nics) else: tool_d[c_name]['environment'] = ['VENT_NICS='+nics] # send logs to syslog if ('syslog' not in s[section]['groups'] and 'core' in s[section]['groups']): log_config['config']['tag'] = '{{.Name}}' tool_d[c_name]['log_config'] = log_config if 'syslog' not in s[section]['groups']: tool_d[c_name]['log_config'] = log_config # mount necessary directories if 'files' in s[section]['groups']: ulimits = [] ulimits.append(docker.types.Ulimit( name='nofile', soft=1048576, hard=1048576)) tool_d[c_name]['ulimits'] = ulimits # check if running in a docker container if 'VENT_CONTAINERIZED' in environ and environ['VENT_CONTAINERIZED'] == 'true': if 'volumes_from' in tool_d[c_name]: tool_d[c_name]['volumes_from'].append( environ['HOSTNAME']) else: tool_d[c_name]['volumes_from'] = [ environ['HOSTNAME']] else: if 'volumes' in tool_d[c_name]: tool_d[c_name]['volumes'][self.path_dirs.base_dir[:-1] ] = {'bind': '/vent', 'mode': 'ro'} else: tool_d[c_name]['volumes'] = { self.path_dirs.base_dir[:-1]: {'bind': '/vent', 'mode': 'ro'}} if files[0]: if 'volumes' in tool_d[c_name]: tool_d[c_name]['volumes'][files[1]] = { 'bind': '/files', 'mode': 'rw'} else: tool_d[c_name]['volumes'] = { files[1]: {'bind': '/files', 'mode': 'rw'}} else: tool_d[c_name]['log_config'] = log_config # add label for priority if 'settings' in overall_dict: try: options_dict = overall_dict['settings'] for option in options_dict: if option == 'priority': tool_d[c_name]['labels']['vent.priority'] = options_dict[option] except Exception as e: # pragma: no cover self.logger.error('unable to store settings options ' 'for docker ' + str(e)) # only start tools that have been built if s[section]['built'] != 'yes': del tool_d[c_name] # store section information for adding info to manifest later else: tool_d[c_name]['section'] = section return status, tool_d def _start_priority_containers(self, groups, group_orders, tool_d): """ Select containers based on priorities to start """ vent_cfg = Template(self.path_dirs.cfg_file) cfg_groups = vent_cfg.option('groups', 'start_order') if cfg_groups[0]: cfg_groups = cfg_groups[1].split(',') else: cfg_groups = [] all_groups = sorted(set(groups)) s_conts = [] f_conts = [] # start tools in order of group defined in vent.cfg for group in cfg_groups: # remove from all_groups because already checked out if group in all_groups: all_groups.remove(group) if group in group_orders: for cont_t in sorted(group_orders[group]): if cont_t[1] not in s_conts: s_conts, f_conts = self._start_container(cont_t[1], tool_d, s_conts, f_conts) # start tools that haven't been specified in the vent.cfg, if any for group in all_groups: if group in group_orders: for cont_t in sorted(group_orders[group]): if cont_t[1] not in s_conts: s_conts, f_conts = self._start_container(cont_t[1], tool_d, s_conts, f_conts) return (s_conts, f_conts) def _start_remaining_containers(self, containers_remaining, tool_d): """ Select remaining containers that didn't have priorities to start """ s_containers = [] f_containers = [] for container in containers_remaining: s_containers, f_containers = self._start_container(container, tool_d, s_containers, f_containers) return (s_containers, f_containers) def _start_container(self, container, tool_d, s_containers, f_containers): """ Start container that was passed in and return status """ # use section to add info to manifest section = tool_d[container]['section'] del tool_d[container]['section'] manifest = Template(self.manifest) try: # try to start an existing container first c = self.d_client.containers.get(container) c.start() s_containers.append(container) manifest.set_option(section, 'running', 'yes') self.logger.info('started ' + str(container) + ' with ID: ' + str(c.short_id)) except Exception as err: s_containers, f_containers = self._run_container( container, tool_d, section, s_containers, f_containers) # save changes made to manifest manifest.write_config() return s_containers, f_containers def _run_container(self, container, tool_d, section, s_containers, f_containers): manifest = Template(self.manifest) try: gpu = 'gpu.enabled' failed = False if (gpu in tool_d[container]['labels'] and tool_d[container]['labels'][gpu] == 'yes'): vent_config = Template(template=self.path_dirs.cfg_file) port = '' host = '' result = vent_config.option('nvidia-docker-plugin', 'port') if result[0]: port = result[1] else: port = '3476' result = vent_config.option('nvidia-docker-plugin', 'host') if result[0]: host = result[1] else: # now just requires ip, ifconfig try: route = check_output(('ip', 'route')).decode( 'utf-8').split('\n') default = '' # grab the default network device. for device in route: if 'default' in device: default = device.split()[4] break # grab the IP address for the default device ip_addr = check_output( ('ifconfig', default)).decode('utf-8') ip_addr = ip_addr.split('\n')[1].split()[1] host = ip_addr except Exception as e: # pragma no cover self.logger.error('failed to grab ip. Ensure that \ ip and ifconfig are installed') nd_url = 'http://' + host + ':' + port + '/v1.0/docker/cli' params = {'vol': 'nvidia_driver'} r = requests.get(nd_url, params=params) if r.status_code == 200: options = r.text.split() for option in options: if option.startswith('--volume-driver='): tool_d[container]['volume_driver'] = option.split('=', 1)[ 1] elif option.startswith('--volume='): vol = option.split('=', 1)[1].split(':') if 'volumes' in tool_d[container]: if isinstance(tool_d[container]['volumes'], list): if len(vol) == 2: c_vol = vol[0] + \ ':' + vol[1] + ':rw' else: c_vol = vol[0] + ':' + \ vol[1] + ':' + vol[2] tool_d[container]['volumes'].append( c_vol) else: # Dictionary tool_d[container]['volumes'][vol[0]] = {'bind': vol[1], 'mode': vol[2]} else: tool_d[container]['volumes'] = {vol[0]: {'bind': vol[1], 'mode': vol[2]}} elif option.startswith('--device='): dev = option.split('=', 1)[1] if 'devices' in tool_d[container]: tool_d[container]['devices'].append(dev + ':' + dev + ':rwm') else: tool_d[container]['devices'] = [ dev + ':' + dev + ':rwm'] else: self.logger.error('Unable to parse ' + 'nvidia-docker option: ' + str(option)) else: failed = True f_containers.append(container) manifest.set_option(section, 'running', 'failed') self.logger.error('failed to start ' + str(container) + ' because nvidia-docker-plugin ' + 'failed with: ' + str(r.status_code)) if not failed: try: self.d_client.containers.remove(container, force=True) self.logger.info( 'removed old existing container: ' + str(container)) except Exception as e: pass cont_id = self.d_client.containers.run(detach=True, **tool_d[container]) s_containers.append(container) manifest.set_option(section, 'running', 'yes') self.logger.info('started ' + str(container) + ' with ID: ' + str(cont_id)) except Exception as e: # pragma: no cover f_containers.append(container) manifest.set_option(section, 'running', 'failed') self.logger.error('failed to start ' + str(container) + ' because: ' + str(e)) return s_containers, f_containers def stop(self, repo, name): args = locals() status = (True, None) try: # !! TODO need to account for plugin containers that have random # names, use labels perhaps options = ['name', 'namespace', 'built', 'groups', 'path', 'image_name', 'branch', 'version'] s, _ = Template(template=self.manifest).constrain_opts(args, options) for section in s: container_name = s[section]['image_name'].replace(':', '-') container_name = container_name.replace('/', '-') try: container = self.d_client.containers.get(container_name) container.stop() self.logger.info('Stopped {0}'.format(str(container_name))) except Exception as e: # pragma: no cover self.logger.error('Failed to stop ' + str(container_name) + ' because: ' + str(e)) except Exception as e: # pragma: no cover self.logger.error('Stop failed with error: ' + str(e)) status = (False, e) return status def repo_commits(self, repo): """ Get the commit IDs for all of the branches of a repository """ commits = [] try: status = self.path_dirs.apply_path(repo) # switch to directory where repo will be cloned to if status[0]: cwd = status[1] else: self.logger.error('apply_path failed. Exiting repo_commits with' ' status: ' + str(status)) return status status = self.repo_branches(repo) if status[0]: branches = status[1] for branch in branches: try: branch_output = check_output(shlex .split('git rev-list origin/' + branch), stderr=STDOUT, close_fds=True).decode('utf-8') branch_output = branch_output.split('\n')[:-1] branch_output += ['HEAD'] commits.append((branch, branch_output)) except Exception as e: # pragma: no cover self.logger.error('repo_commits failed with error: ' + str(e) + ' on branch: ' + str(branch)) status = (False, e) return status else: self.logger.error('repo_branches failed. Exiting repo_commits' ' with status: ' + str(status)) return status chdir(cwd) status = (True, commits) except Exception as e: # pragma: no cover self.logger.error('repo_commits failed with error: ' + str(e)) status = (False, e) return status def repo_branches(self, repo): """ Get the branches of a repository """ branches = [] try: # switch to directory where repo will be cloned to status = self.path_dirs.apply_path(repo) if status[0]: cwd = status[1] else: self.logger.error('apply_path failed. Exiting repo_branches' ' with status ' + str(status)) return status branch_output = check_output(shlex.split('git branch -a'), stderr=STDOUT, close_fds=True) branch_output = branch_output.split(b'\n') for branch in branch_output: br = branch.strip() if br.startswith(b'*'): br = br[2:] if b'/' in br: branches.append(br.rsplit(b'/', 1)[1].decode('utf-8')) elif br: branches.append(br.decode('utf-8')) branches = list(set(branches)) for branch in branches: try: check_output(shlex.split('git checkout ' + branch), stderr=STDOUT, close_fds=True) except Exception as e: # pragma: no cover self.logger.error('repo_branches failed with error: ' + str(e) + ' on branch: ' + str(branch)) status = (False, e) return status chdir(cwd) status = (True, branches) except Exception as e: # pragma: no cover self.logger.error('repo_branches failed with error: ' + str(e)) status = (False, e) return status def repo_tools(self, repo, branch, version): """ Get available tools for a repository branch at a version """ try: tools = [] status = self.path_dirs.apply_path(repo) # switch to directory where repo will be cloned to if status[0]: cwd = status[1] else: self.logger.error('apply_path failed. Exiting repo_tools with' ' status: ' + str(status)) return status # TODO commenting out for now, should use update_repo #status = self.p_helper.checkout(branch=branch, version=version) status = (True, None) if status[0]: path, _, _ = self.path_dirs.get_path(repo) tools = AvailableTools(path, version=version) else: self.logger.error('checkout failed. Exiting repo_tools with' ' status: ' + str(status)) return status chdir(cwd) status = (True, tools) except Exception as e: # pragma: no cover self.logger.error('repo_tools failed with error: ' + str(e)) status = (False, e) return status
class RmqEs(): """ opens a connection to rabbitmq and receives messages based on the provided binding keys and then takes those messages and sends them to an elasticsearch index """ es_conn = None es_host = None # get custom set port or else use default port es_port = int(os.getenv("ELASTICSEARCH_CUSTOM_PORT", 9200)) rmq_host = None # get custom set port or else use default port rmq_port = int(os.getenv("RABBITMQ_CUSTOM_PORT", 5672)) channel = None queue_name = None def __init__(self, es_host="elasticsearch", rmq_host="rabbitmq"): """ initialize host information """ self.es_host = es_host self.rmq_host = rmq_host self.logger = Logger(__name__) def connections(self, wait): """ wait for connections to both rabbitmq and elasticsearch to be made before binding a routing key to a channel and sending messages to elasticsearch """ while wait: try: params = pika.ConnectionParameters(host=self.rmq_host, port=self.rmq_port) connection = pika.BlockingConnection(params) self.channel = connection.channel() self.channel.exchange_declare(exchange='topic_recs', exchange_type='topic') result = self.channel.queue_declare(exclusive=True) self.queue_name = result.method.queue self.es_conn = Elasticsearch([{'host': self.es_host, 'port': self.es_port}]) wait = False print("connected to rabbitmq...") except Exception as e: # pragma: no cover print(str(e)) print("waiting for connection to rabbitmq..." + str(e)) time.sleep(2) wait = True def callback(self, ch, method, properties, body): """ callback triggered on rabiitmq message received and sends it to an elasticsearch index """ index = method.routing_key.split(".")[1] failed = False try: doc = ast.literal_eval(body) except Exception as e: # pragma: no cover try: body = body.strip().replace('"', '\"') body = '{"log":"' + body + '"}' doc = ast.literal_eval(body) except Exception as e: # pragma: no cover failed = True if not failed: try: self.es_conn.index(index=index, doc_type=method.routing_key.split(".")[1], id=method.routing_key + "." + str(uuid.uuid4()), body=doc) except Exception as e: # pragma: no cover self.logger.error("Connection failed " + str(e)) def start(self): """ start the channel listener and start consuming messages """ self.connections(True) binding_keys = sys.argv[1:] if not binding_keys: print(sys.stderr, "Usage: {0!s} [binding_key]...".format(sys.argv[0])) sys.exit(0) for binding_key in binding_keys: self.channel.queue_bind(exchange='topic_recs', queue=self.queue_name, routing_key=binding_key) def consume(self): # pragma: no cover """ start consuming rabbitmq messages """ print(' [*] Waiting for logs. To exit press CTRL+C') self.channel.basic_consume(self.callback, queue=self.queue_name, no_ack=True) self.channel.start_consuming()
class ToolForm(npyscreen.ActionForm): """ Tools form for the Vent CLI """ def __init__(self, *args, **keywords): """ Initialize tool form objects """ self.logger = Logger(__name__) self.api_action = System() self.tools_inst = Tools() action = {'api_action': self.tools_inst} self.tools_tc = {} self.repo_widgets = {} if keywords['action_dict']: action.update(keywords['action_dict']) if keywords['names']: i = 1 for name in keywords['names']: try: action['action_object' + str(i)] = getattr(self.tools_inst, name) except AttributeError: action['action_object' + str(i)] = getattr(self.api_action, name) i += 1 self.action = action # get list of all possible group views to display self.views = deque() possible_groups = set() manifest = Template(self.api_action.manifest) tools = self.tools_inst.inventory(choices=['tools'])[1]['tools'] for tool in tools: groups = manifest.option(tool, 'groups')[1].split(',') for group in groups: # don't do core because that's the purpose of all in views if group != '' and group != 'core': possible_groups.add(group) self.manifest = manifest self.views += possible_groups self.views.append('all groups') self.no_instance = ['remove'] super(ToolForm, self).__init__(*args, **keywords) def quit(self, *args, **kwargs): """ Overridden to switch back to MAIN form """ self.parentApp.switchForm('MAIN') def toggle_view(self, *args, **kwargs): """ Toggles the view between different groups """ group_to_display = self.views.popleft() self.cur_view.value = group_to_display for repo in self.tools_tc: for tool in self.tools_tc[repo]: t_groups = self.manifest.option(tool, 'groups')[1] if group_to_display not in t_groups and \ group_to_display != 'all groups': self.tools_tc[repo][tool].value = False self.tools_tc[repo][tool].hidden = True else: self.tools_tc[repo][tool].value = True self.tools_tc[repo][tool].hidden = False # redraw elements self.display() # add view back to queue self.views.append(group_to_display) def create(self, group_view=False): """ Update with current tools """ self.add_handlers({'^T': self.quit, '^Q': self.quit}) self.add(npyscreen.TitleText, name='Select which tools to ' + self.action['action'] + ':', editable=False) togglable = ['remove'] if self.action['action_name'] in togglable: self.cur_view = self.add(npyscreen.TitleText, name='Group view:', value='all groups', editable=False, rely=3) self.add_handlers({'^V': self.toggle_view}) i = 5 else: i = 4 if self.action['action_name'] == 'start': response = self.tools_inst.inventory(choices=['repos', 'tools', 'built', 'running']) else: response = self.tools_inst.inventory(choices=['repos', 'tools']) if response[0]: inventory = response[1] repos = inventory['repos'] # dict has repo as key and list of core/non-core tools as values has_core = {} has_non_core = {} # find all tools that are in this repo # and list them if they are core for repo in repos: core_list = [] ncore_list = [] # splice the repo names for processing if (repo.startswith('http')): repo_name = repo.rsplit('/', 2)[1:] else: repo_name = repo.split('/') for tool in inventory['tools']: tool_repo_name = tool.split(':') # cross reference repo names if (repo_name[0] == tool_repo_name[0] and repo_name[1] == tool_repo_name[1]): # check to ensure tool not set to locally active = no # in vent.cfg externally_active = False vent_cfg_file = self.api_action.vent_config vent_cfg = Template(vent_cfg_file) tool_pairs = vent_cfg.section('external-services')[1] for ext_tool in tool_pairs: if ext_tool[0].lower() == inventory['tools'][tool]: try: ext_tool_options = json.loads(ext_tool[1]) loc = 'locally_active' if (loc in ext_tool_options and ext_tool_options[loc] == 'no'): externally_active = True except Exception as e: self.logger.error("Couldn't check ext" ' because: ' + str(e)) externally_active = False manifest = Template(self.api_action.manifest) if not externally_active: instance_num = re.search(r'\d+$', manifest.option( tool, 'name')[1]) if not instance_num: ncore_list.append(tool) # multiple instances share same image elif self.action['action_name'] not in self.no_instance: ncore_list.append(tool) has_core[repo] = core_list has_non_core[repo] = ncore_list for repo in repos: self.tools_tc[repo] = {} if self.action['cores']: # make sure only repos with core tools are displayed if has_core.get(repo): self.repo_widgets[repo] = self.add(npyscreen.TitleText, name='Plugin: '+repo, editable=False, rely=i, relx=5) for tool in has_core[repo]: tool_name = tool.split(':', 2)[2].split('/')[-1] if tool_name == '': tool_name = '/' self.tools_tc[repo][tool] = self.add( npyscreen.CheckBox, name=tool_name, value=True, relx=10) i += 1 i += 3 else: # make sure only repos with non-core tools are displayed if has_non_core.get(repo): self.repo_widgets[repo] = self.add(npyscreen.TitleText, name='Plugin: '+repo, editable=False, rely=i, relx=5) for tool in has_non_core[repo]: tool_name = tool.split(':', 2)[2].split('/')[-1] if tool_name == '': tool_name = '/' self.tools_tc[repo][tool] = self.add( npyscreen.CheckBox, name=tool_name, value=True, relx=10) i += 1 i += 3 return def on_ok(self): """ Take the tool selections and perform the provided action on them """ def diff(first, second): """ Get the elements that exist in the first list and not in the second """ second = set(second) return [item for item in first if item not in second] def popup(original, orig_type, thr, title): """ Start the thread and display a popup of info until the thread is finished """ thr.start() info_str = '' while thr.is_alive(): if orig_type == 'containers': info = diff(Containers(), original) elif orig_type == 'images': info = diff(Images(), original) if info: info_str = '' for entry in info: info_str = entry[0] + ': ' + entry[1] + '\n' + info_str if self.action['action_name'] != 'configure': npyscreen.notify_wait(info_str, title=title) time.sleep(1) thr.join() try: result = self.api_action.queue.get(False) if isinstance(result, tuple) and isinstance(result[1], tuple): running, failed = result[1] r_str = '' for container in running: r_str += container + ': successful\n' for container in failed: r_str += container + ': failed\n' npyscreen.notify_confirm(r_str) except Exception as e: # pragma: no cover pass return if self.action['type'] == 'images': originals = Images() else: originals = Containers() tool_d = {} if self.action['action_name'] in ['remove', 'stop', 'update']: reconfirmation_str = '' if self.action['cores']: reconfirmation_str = 'Are you sure you want to ' reconfirmation_str += self.action['action_name'] reconfirmation_str += ' core containers?' else: reconfirmation_str = 'Are you sure you want to ' reconfirmation_str += self.action['action_name'] reconfirmation_str += ' plugin containers?' perform = npyscreen.notify_ok_cancel(reconfirmation_str, title='Confirm command') if not perform: return tools_to_configure = [] for repo in self.tools_tc: for tool in self.tools_tc[repo]: if self.tools_tc[repo][tool].value: t = tool.split(':', 2)[2].split('/')[-1] if t.startswith('/:'): t = ' '+t[1:] t = t.split(':') if self.action['action_name'] in ['start', 'stop']: status = self.action['action_object1'](repo, t[0]) elif self.action['action_name'] == 'configure': constraints = {'name': t[0], 'branch': t[1], 'version': t[2], 'repo': repo} options = ['type'] action = self.action['api_action'] tool = self.manifest.constrain_opts(constraints, options)[0] # only one tool should be returned name = list(tool.keys())[0] if tool[name]['type'] == 'registry': registry_image = True else: registry_image = False kargs = {'name': 'Configure ' + t[0], 'tool_name': t[0], 'branch': t[1], 'version': t[2], 'repo': repo, 'next_tool': None, 'get_configure': self.api_action.get_configure, 'save_configure': self.api_action.save_configure, 'restart_tools': self.api_action.restart_tools, 'start_tools': action.start, 'from_registry': registry_image} if tools_to_configure: kargs['next_tool'] = tools_to_configure[-1] self.parentApp.addForm('EDITOR' + t[0], EditorForm, **kargs) tools_to_configure.append('EDITOR' + t[0]) elif self.action['action_name'] == 'remove': status = self.action['action_object1'](repo, t[0]) else: kargs = {'name': t[0], 'branch': t[1], 'version': t[2]} # add core recognition if self.action['cores']: kargs.update({'groups': 'core'}) # use latest version for update, not necessarily # version in manifest if self.action['action_name'] == 'update': if t[2] != 'HEAD': repo_commits = self.tools_inst.repo_commits(repo)[ 1] for branch in repo_commits: if branch[0] == t[1]: kargs.update( {'new_version': branch[1][0]}) else: kargs.update({'new_version': 'HEAD'}) thr = Thread(target=self.action['action_object1'], args=(), kwargs=kargs) popup(originals, self.action['type'], thr, 'Please wait, ' + self.action['present_t'] + '...') if self.action['action_name'] != 'configure': npyscreen.notify_confirm('Done ' + self.action['present_t'] + '.', title=self.action['past_t']) self.quit() else: if len(tools_to_configure) > 0: self.parentApp.change_form(tools_to_configure[-1]) else: npyscreen.notify_confirm('No tools selected, returning to' ' main menu', title='No action taken') self.quit() def on_cancel(self): """ When user clicks cancel, will return to MAIN """ self.quit()
class Plugin: """ Handle Plugins """ def __init__(self, **kargs): self.path_dirs = PathDirs(**kargs) self.manifest = os.path.join(self.path_dirs.meta_dir, "plugin_manifest.cfg") self.d_client = docker.from_env() self.logger = Logger(__name__) def apply_path(self, repo): """ Set path to where the repo is and return original path """ self.logger.info("Starting: apply_path") self.logger.info("repo given: "+str(repo)) status = (True, None) try: # rewrite repo for consistency if repo.endswith(".git"): repo = repo.split(".git")[0] # get org and repo name and path repo will be cloned to org, name = repo.split("/")[-2:] self.path = os.path.join(self.path_dirs.plugins_dir, org, name) self.logger.info("cloning to path: "+str(self.path)) # save current path cwd = os.getcwd() # set to new repo path os.chdir(self.path) status = (True, cwd) except Exception as e: self.logger.error("apply_path failed with error: "+str(e)) status = (False, e) self.logger.info("Status of apply_path: "+str(status)) self.logger.info("Finished: apply_path") return status def repo_branches(self, repo): """ Get the branches of a repository """ self.logger.info("Starting: repo_branches") self.logger.info("repo given: "+str(repo)) status = (True, None) branches = [] try: # switch to directory where repo will be cloned to status = self.apply_path(repo) if status[0]: cwd = status[1] else: self.logger.info("apply_path failed. Exiting repo_branches with status "+str(status)) return status junk = subprocess.check_output(shlex.split("git pull --all"), stderr=subprocess.STDOUT, close_fds=True) branch_output = subprocess.check_output(shlex.split("git branch -a"), stderr=subprocess.STDOUT, close_fds=True) branch_output = branch_output.split("\n") for branch in branch_output: b = branch.strip() if b.startswith('*'): b = b[2:] if "/" in b: branches.append(b.rsplit('/', 1)[1]) elif b: branches.append(b) branches = list(set(branches)) self.logger.info("branches found: "+str(branches)) for branch in branches: try: junk = subprocess.check_output(shlex.split("git checkout " + branch), stderr=subprocess.STDOUT, close_fds=True) except Exception as e: # pragma: no cover self.logger.error("repo_branches failed with error: "+str(e)+" on branch: "+str(branch)) status = (False, e) self.logger.info("Exiting repo_branches with status: "+str(status)) return status try: os.chdir(cwd) except Exception as e: # pragma: no cover self.logger.error("unable to change directory to: "+str(cwd)+"because: "+str(e)) status = (True, branches) except Exception as e: self.logger.error("repo_branches failed with error: "+str(e)) status = (False, e) self.logger.info("Status of repo_branches: "+str(status)) self.logger.info("Finished: repo_branches") return status def repo_commits(self, repo): """ Get the commit IDs for all of the branches of a repository """ self.logger.info("Starting: repo_commits") self.logger.info("repo given: "+str(repo)) status = (True, None) commits = [] try: status = self.apply_path(repo) # switch to directory where repo will be cloned to if status[0]: cwd = status[1] else: self.logger.info("apply_path failed. Exiting repo_commits with status: "+str(status)) return status status = self.repo_branches(repo) if status[0]: branches = status[1] for branch in branches: try: branch_output = subprocess.check_output(shlex.split("git rev-list " + branch), stderr=subprocess.STDOUT, close_fds=True) branch_output = ['HEAD'] + branch_output.split("\n")[:-1] commits.append((branch, branch_output)) except Exception as e: # pragma: no cover self.logger.error("repo_commits failed with error: "+str(e)+" on branch: "+str(branch)) status = (False, e) self.logger.info("Exiting repo_commits with status: "+str(status)) return status else: self.logger.info("repo_branches failed. Exiting repo_commits with status: "+str(status)) return status try: os.chdir(cwd) except Exception as e: # pragma: no cover self.logger.error("unable to change directory to: "+str(cwd)+" because: "+str(e)) status = (True, commits) except Exception as e: self.logger.error("repo_commits failed with error: "+str(e)) status = (False, e) self.logger.info("Status of repo_commits: "+str(status)) self.logger.info("Finished: repo_commits") return status def repo_tools(self, repo, branch, version): """ Get available tools for a repository branch at a version """ self.logger.info("Starting: repo_tools") self.logger.info("repo given: "+str(repo)) self.logger.info("branch given: "+str(branch)) self.logger.info("version given: "+str(version)) status = (True, None) try: tools = [] status = self.apply_path(repo) # switch to directory where repo will be cloned to if status[0]: cwd = status[1] else: self.logger.info("apply_path failed. Exiting repo_tools with status: "+str(status)) return status self.branch = branch self.version = version status = self.checkout() if status[0]: tools = self._available_tools() else: self.logger.info("checkout failed. Exiting repo_tools with status: "+str(status)) return status try: os.chdir(cwd) except Exception as e: # pragma: no cover self.logger.error("unable to change directory to: "+str(cwd)+" because: "+str(e)) status = (True, tools) except Exception as e: self.logger.error("repo_tools failed with error: "+str(e)) status = (False, e) self.logger.info("Status of repo_tools: "+str(status)) self.logger.info("Finished: repo_tools") return status def clone(self, repo, user=None, pw=None): """ Clone the repository """ self.logger.info("Starting: clone") self.logger.info("repo given: "+str(repo)) self.logger.info("user given: "+str(user)) status = (True, None) try: self.org = None self.name = None self.repo = repo # save current path cwd = os.getcwd() self.logger.info("current working directory: "+str(cwd)) # rewrite repo for consistency if self.repo.endswith(".git"): self.repo = self.repo.split(".git")[0] # get org and repo name and path repo will be cloned to self.org, self.name = self.repo.split("/")[-2:] self.logger.info("org name found: "+str(self.org)) self.logger.info("repo name found: "+str(self.name)) self.path = os.path.join(self.path_dirs.plugins_dir, self.org, self.name) self.logger.info("path to clone to: "+str(self.path)) # check if the directory exists, if so return now status = self.path_dirs.ensure_dir(self.path) if not status[0]: self.logger.info("ensure_dir failed. Exiting clone with status: "+str(status)) return status # set to new repo path os.chdir(self.path) # if path already exists, try git checkout to update if status[0] and status[1] == 'exists': try: response = subprocess.check_output(shlex.split("git -C "+self.path+" rev-parse"), stderr=subprocess.STDOUT, close_fds=True) self.logger.info("path already exists: "+str(self.path)) status = (True, cwd) self.logger.info("Status of clone: "+str(status)) self.logger.info("Finished: clone") return status except Exception as e: # pragma: no cover self.logger.error("unable to checkout: "+str(path)+" because: "+str(e)) status = (False, e) self.logger.info("Exiting clone with status: "+str(status)) return status # ensure cloning still works even if ssl is broken...probably should be improved response = subprocess.check_output(shlex.split("git config --global http.sslVerify false"), stderr=subprocess.STDOUT, close_fds=True) # check if user and pw were supplied, typically for private repos if user and pw: # only https is supported when using user/pw repo = 'https://'+user+':'+pw+'@'+self.repo.split("https://")[-1] # clone repo and build tools response = subprocess.check_output(shlex.split("git clone --recursive " + repo + " ."), stderr=subprocess.STDOUT, close_fds=True) status = (True, cwd) except Exception as e: self.logger.error("clone failed with error: "+str(e)) status = (False, e) self.logger.info("Status of clone: "+str(status)) self.logger.info("Finished: clone") return status def add(self, repo, tools=None, overrides=None, version="HEAD", branch="master", build=True, user=None, pw=None, groups=None, version_alias=None, wild=None, remove_old=True, disable_old=True, limit_groups=None): """ Adds a plugin of tool(s) tools is a list of tuples, where the pair is a tool name (path to Dockerfile) and version tools are for explicitly limiting which tools and versions (if version in tuple is '', then defaults to version) overrides is a list of tuples, where the pair is a tool name (path to Dockerfile) and a version overrides are for explicitly removing tools and overriding versions of tools (if version in tuple is '', then tool is removed, otherwise that tool is checked out at the specific version in the tuple) if tools and overrides are left as empty lists, then all tools in the repo are pulled down at the version and branch specified or defaulted to version is globally set for all tools, unless overridden in tools or overrides branch is globally set for all tools build is a boolean of whether or not to build the tools now user is the username for a private repo if needed pw is the password to go along with the username for a private repo groups is globally set for all tools version_alias is globally set for all tools and is a mapping from a friendly version tag to the real version commit ID wild lets you specify individual overrides for additional values in the tuple of tools or overrides. wild is a list containing one or more of the following: branch, build, groups, version_alias the order of the items in the wild list will expect values to be tacked on in the same order to the tuple for tools and overrides in additional to the tool name and version remove_old lets you specify whether or not to remove previously found tools that match to ones being added currently (note does not stop currently running instances of the older version) disable_old lets you specify whether or not to disable previously found tools that match to ones being added currently (note does not stop currently running instances of the older version) limit_groups is a list of groups to build tools for that match group names in vent.template of each tool if exists Examples: repo=fe (get all tools from repo 'fe' at version 'HEAD' on branch 'master') repo=foo, version="3d1f", branch="foo" (get all tools from repo 'foo' at verion '3d1f' on branch 'foo') repo=foo, tools=[('bar', ''), ('baz', '1d32')] (get only 'bar' from repo 'foo' at version 'HEAD' on branch 'master' and 'baz' from repo 'foo' at version '1d32' on branch 'master', ignore all other tools in repo 'foo') repo=foo overrides=[('baz/bar', ''), ('.', '1c4e')], version='4fad' (get all tools from repo 'foo' at verion '4fad' on branch 'master' except 'baz/bar' and for tool '.' get version '1c4e') repo=foo tools=[('bar', '1a2d')], overrides=[('baz', 'f2a1')] (not a particularly useful example, but get 'bar' from 'foo' at version '1a2d' and get 'baz' from 'foo' at version 'f2a1' on branch 'master', ignore all other tools) """ # initialize and store class objects self.tools = tools self.overrides = overrides self.version = version self.branch = branch self.build = build self.groups = groups # TODO these need to be implemented self.version_alias = version_alias self.wild = wild self.remove_old = remove_old self.disable_old = disable_old self.limit_groups = limit_groups status = (True, None) status_code, cwd = self.clone(repo, user=user, pw=pw) status = self._build_tools(status_code) # set back to original path try: os.chdir(cwd) except Exception as e: # pragma: no cover pass return status @ErrorHandler def builder(self, template, match_path, image_name, section, build=None, branch=None, version=None): """ Build tools """ self.logger.info("Starting: builder") self.logger.info("install path: "+str(match_path)) self.logger.info("image name: "+str(image_name)) self.logger.info("build: "+str(build)) self.logger.info("branch: "+str(branch)) self.logger.info("version: "+str(version)) if build: self.build = build elif not hasattr(self, 'build'): self.build = True if branch: self.branch = branch elif not hasattr(self, 'branch'): self.branch = 'master' if version: self.version = version elif not hasattr(self, 'version'): self.version = 'HEAD' cwd = os.getcwd() self.logger.info("current working directory: "+str(cwd)) try: os.chdir(match_path) except Exception as e: self.logger.error("unable to change to directory: "+str(match_path)+" because: "+str(e)) return None template = self._build_image(template, match_path, image_name, section) try: os.chdir(cwd) except Exception as e: # pragma: no cover self.logger.error("unable to change to directory: "+str(cwd)+" because: "+str(e)) self.logger.info("template of builder: "+str(template)) self.logger.info("Finished: builder") return template def _build_tools(self, status): """ Create list of tools, paths, and versions to be built and sends them to build_manifest """ response = (True, None) # !! TODO implement features: wild, remove_old, disable_old, limit_groups # check result of clone, ensure successful or that it already exists if status: response = self.checkout() if response[0]: matches = [] if self.tools is None and self.overrides is None: # get all tools matches = self._available_tools() elif self.tools is None: # there's only something in overrides # grab all the tools then apply overrides matches = self._available_tools() # !! TODO apply overrides to matches elif self.overrides is None: # there's only something in tools # only grab the tools specified matches = self.get_tool_matches() else: # both tools and overrides were specified # grab only the tools specified, with the overrides applied orig_matches = self.get_tool_matches() matches = orig_matches for override in self.overrides: override_t = None if override[0] == '.': override_t = ('', override[1]) else: override_t = override for match in orig_matches: if override_t[0] == match[0]: matches.remove(match) matches.append(override_t) if len(matches) > 0: self._build_manifest(matches) else: response = (False, status) return response def get_tool_matches(self): """ Get the tools paths and versions that were specified by self.tools and self.version """ matches = [] if not hasattr(self, 'tools'): self.tools = [] if not hasattr(self, 'version'): self.version = 'HEAD' for tool in self.tools: match_version = self.version if tool[1] != '': match_version = tool[1] match = '' if tool[0].endswith('/'): match = tool[0][:-1] elif tool[0] != '.': match = tool[0] if not match.startswith('/') and match != '': match = '/'+match matches.append((match, match_version)) return matches def _build_manifest(self, matches): """ Builds and writes the manifest for the tools being added """ # !! TODO check for pre-existing that conflict with request and disable and/or remove image for match in matches: template = Template(template=self.manifest) # !! TODO check for special settings here first for the specific match self.version = match[1] response = self.checkout() if response[0]: section = self.org + ":" + self.name + ":" + match[0] + ":" + self.branch + ":" + self.version match_path = self.path + match[0] image_name = self.org + "-" + self.name + "-" if match[0] != '': # if tool is in a subdir, add that to the name of the image image_name += '-'.join(match[0].split('/')[1:]) + "-" image_name += self.branch + ":" + self.version # check if the section already exists exists, options = template.section(section) previous_commit = None previous_commits = None head = False if exists: for option in options: # TODO check if tool name but a different version exists - then disable/remove if set if option[0] == 'version' and option[1] == 'HEAD': head = True if option[0] == 'built' and option[1] == 'yes': # !! TODO remove pre-existing image pass if option[0] == 'commit_id': previous_commit = option[1] if option[0] == 'previous_versions': previous_commits = option[1] # !! TODO # check if section should be removed from config - i.e. all tools, # but new commit removed one that was in a previous commit # set template section and options for tool at version and branch template.add_section(section) template.set_option(section, "name", match[0].split('/')[-1]) template.set_option(section, "namespace", self.org+'/'+self.name) template.set_option(section, "path", match_path) template.set_option(section, "repo", self.repo) template.set_option(section, "enabled", "yes") template.set_option(section, "branch", self.branch) template.set_option(section, "version", self.version) template.set_option(section, "last_updated", str(datetime.datetime.utcnow()) + " UTC") template.set_option(section, "image_name", image_name) vent_template = Template(template=os.path.join(match_path, 'vent.template')) vent_status, response = vent_template.option("info", "name") if vent_status: template.set_option(section, "link_name", response) else: template.set_option(section, "link_name", match[0].split('/')[-1]) commit_id = None if self.version == 'HEAD': os.chdir(match_path) commit_id = subprocess.check_output(shlex.split("git rev-parse --short HEAD"), stderr=subprocess.STDOUT, close_fds=True).strip() template.set_option(section, "commit_id", commit_id) if head: # no need to store previous commits if not HEAD, since # the version will always be the same commit ID if previous_commit and previous_commit != commit_id: if previous_commits and previous_commit not in previous_commits: previous_commits = previous_commit+','+previous_commits elif not previous_commits: previous_commits = previous_commit if previous_commits and previous_commits != commit_id: template.set_option(section, "previous_versions", previous_commits) if self.version_alias: template.set_option(section, "version_alias", self.version_alias) if self.groups: template.set_option(section, "groups", self.groups) else: vent_template = os.path.join(match_path, 'vent.template') if os.path.exists(vent_template): v_template = Template(template=vent_template) groups = v_template.option("info", "groups") if groups[0]: template.set_option(section, "groups", groups[1]) template = self._build_image(template, match_path, image_name, section) # write out configuration to the manifest file template.write_config() # reset to repo directory os.chdir(self.path) return def _build_image(self, template, match_path, image_name, section): """ Build docker images and store results in template """ # !! TODO return status of whether it built successfully or not if self.build: try: os.chdir(match_path) # currently can't use docker-py because it doesn't support labels on images yet name = template.option(section, "name") groups = template.option(section, "groups") if groups[1] == "" or not groups[0]: groups = (True, "none") if not name[0]: name = (True, image_name) # pull if '/' in image_name, fallback to build pull = False if '/' in image_name: try: self.logger.info("Trying to pull "+image_name) output = subprocess.check_output(shlex.split("docker pull "+image_name), stderr=subprocess.STDOUT, close_fds=True) self.logger.info("Pulling "+name[1]+"\n"+str(output)) for line in output.split('\n'): if line.startswith("Digest: sha256:"): image_id = line.split("Digest: sha256:")[1][:12] if image_id: template.set_option(section, "built", "yes") template.set_option(section, "image_id", image_id) template.set_option(section, "last_updated", str(datetime.datetime.utcnow()) + " UTC") status = (True, "Pulled "+image_name) self.logger.info(str(status)) else: template.set_option(section, "built", "failed") template.set_option(section, "last_updated", str(datetime.datetime.utcnow()) + " UTC") status = (False, "Failed to pull image "+str(output.split('\n')[-1])) self.logger.warning(str(status)) pull = True except Exception as e: # pragma: no cover self.logger.warning("Failed to pull image, going to build instead: "+str(e)) if not pull: output = subprocess.check_output(shlex.split("docker build --label vent --label vent.name="+name[1]+" --label vent.groups="+groups[1]+" -t " + image_name + " ."), stderr=subprocess.STDOUT, close_fds=True) self.logger.info("Building "+name[1]+"\n"+str(output)) image_id = "" for line in output.split("\n"): if line.startswith("Successfully built "): image_id = line.split("Successfully built ")[1].strip() template.set_option(section, "built", "yes") template.set_option(section, "image_id", image_id) template.set_option(section, "last_updated", str(datetime.datetime.utcnow()) + " UTC") except Exception as e: # pragma: no cover self.logger.error("unable to build image: "+str(image_name)+" because: "+str(e)) template.set_option(section, "built", "failed") template.set_option(section, "last_updated", str(datetime.datetime.utcnow()) + " UTC") else: template.set_option(section, "built", "no") template.set_option(section, "last_updated", str(datetime.datetime.utcnow()) + " UTC") return template def _available_tools(self, groups=None): """ Return list of possible tools in repo for the given version and branch """ matches = [] if not hasattr(self, 'path'): return matches if groups: groups = groups.split(",") for root, dirnames, filenames in os.walk(self.path): for filename in fnmatch.filter(filenames, 'Dockerfile'): # !! TODO deal with wild/etc.? if groups: try: template = Template(template=os.path.join(root, 'vent.template')) for group in groups: template_groups = template.option("info", "groups") if template_groups[0] and group in template_groups[1]: matches.append((root.split(self.path)[1], self.version)) except Exception as e: # pragma: no cover pass else: matches.append((root.split(self.path)[1], self.version)) return matches def checkout(self): """ Checkout a specific version and branch of a repo """ if not hasattr(self, 'branch'): self.branch = 'master' if not hasattr(self, 'version'): self.version = 'HEAD' response = (True, None) try: status = subprocess.check_output(shlex.split("git checkout " + self.branch), stderr=subprocess.STDOUT, close_fds=True) status = subprocess.check_output(shlex.split("git pull"), stderr=subprocess.STDOUT, close_fds=True) status = subprocess.check_output(shlex.split("git reset --hard " + self.version), stderr=subprocess.STDOUT, close_fds=True) response = (True, status) except Exception as e: # pragma: no cover response = (False, os.getcwd()+str(e)) return response @staticmethod def add_image(image, tag="latest"): """ Add an image from a registry/hub rather than building from a repository """ # !! TODO return def constraint_options(self, constraint_dict, options): """ Return result of constraints and options against a template """ constraints = {} template = Template(template=self.manifest) for constraint in constraint_dict: if constraint != 'self': if constraint_dict[constraint] or constraint_dict[constraint] == '': constraints[constraint] = constraint_dict[constraint] results = template.constrained_sections(constraints=constraints, options=options) return results, template def tools(self): """ Return list of tuples of all tools """ tools = [] template = Template(template=self.manifest) exists, sections = template.sections() if exists: for section in sections: options = {'section':section, 'enabled':None, 'built':None, 'version':None, 'repo':None, 'branch':None, 'name':None, 'groups':None, 'image_name':None} for option in options.keys(): exists, value = template.option(section, option) if exists: options[option] = value tools.append(options) return tools def remove(self, name=None, repo=None, namespace=None, branch="master", groups=None, enabled="yes", version="HEAD", built="yes"): """ Remove tool (name) or repository, repository is the url. If no arguments are specified, all tools will be removed for the defaults. """ # initialize args = locals() status = (True, None) # get resulting dictionary of sections with options that match constraints results, template = self.constraint_options(args, []) for result in results: response, image_name = template.option(result, 'image_name') # check for container and remove container_name = image_name.replace(':', '-').replace('/', '-') try: container = self.d_client.containers.get(container_name) response = container.remove(v=True, force=True) self.logger.info(response) self.logger.info("Removing plugin container: "+container_name) except Exception as e: # pragma: no cover self.logger.warn("Unable to remove the plugin container: " + container_name + " because: " + str(e)) # check for image and remove try: response = self.d_client.images.remove(image_name) self.logger.info(response) self.logger.info("Removing plugin image: "+image_name) except Exception as e: # pragma: no cover self.logger.warn("Unable to remove the plugin image: " + image_name + " because: " + str(e)) # remove tool from the manifest status = template.del_section(result) self.logger.info("Removing plugin tool: "+result) # TODO if all tools from a repo have been removed, remove the repo template.write_config() return status def update(self, name=None, repo=None, namespace=None, branch=None, groups=None): """ Update tool (name) or repository, repository is the url. If no arguments are specified, all tools will be updated """ # initialize args = locals() status = (False, None) options = ['branch', 'groups', 'image_name'] # get resulting dictionary of sections with options that match constraints results, template = self.constraint_options(args, options) for result in results: # check for container and remove try: container_name = results['image_name'].replace(':', '-') \ .replace('/', '-') container = self.d_client.containers.get(container_name) response = container.remove(v=True, force=True) except Exception as e: # pragma: no cover pass # TODO git pull # TODO build # TODO docker pull # TODO update tool in the manifest self.logger.info("Updating plugin tool: "+result) template.write_config() return status # !! TODO name or group ? def versions(self, name, namespace=None, branch="master"): """ Return available versions of a tool """ # initialize args = locals() versions = [] options = ['version', 'previous_versions'] # get resulting dictionary of sections with options that match constraints results, _ = self.constraint_options(args, options) for result in results: version_list = [results[result]['version']] if 'previous_versions' in results[result]: version_list = version_list+(results[result]['previous_versions']).split(',') versions.append((result, version_list)) return versions # !! TODO name or group ? def current_version(self, name, namespace=None, branch="master"): """ Return current version for a given tool """ # initialize args = locals() versions = [] options = ['version'] # get resulting dictionary of sections with options that match constraints results, _ = self.constraint_options(args, options) for result in results: versions.append((result, results[result]['version'])) return versions # !! TODO name or group ? def state(self, name, namespace=None, branch="master"): """ Return state of a tool, disabled/enabled for each version """ # initialize args = locals() states = [] options = ['enabled'] # get resulting dictionary of sections with options that match constraints results, _ = self.constraint_options(args, options) for result in results: if results[result]['enabled'] == 'yes': states.append((result, 'enabled')) else: states.append((result, 'disabled')) return states # !! TODO name or group ? def enable(self, name, namespace=None, branch="master", version="HEAD"): """ Enable tool at a specific version, default to head """ # initialize args = locals() status = (False, None) # get resulting dictionary of sections with options that match constraints results, template = self.constraint_options(args, []) for result in results: status = template.set_option(result, 'enabled', 'yes') template.write_config() return status # !! TODO name or group ? def disable(self, name, namespace=None, branch="master", version="HEAD"): """ Disable tool at a specific version, default to head """ # initialize args = locals() status = (False, None) # get resulting dictionary of sections with options that match constraints results, template = self.constraint_options(args, []) for result in results: status = template.set_option(result, 'enabled', 'no') template.write_config() return status
class PluginHelper: """ Handle helper functions for the Plugin class """ def __init__(self, **kargs): self.d_client = docker.from_env() self.path_dirs = PathDirs(**kargs) self.manifest = join(self.path_dirs.meta_dir, 'plugin_manifest.cfg') self.logger = Logger(__name__) def constraint_options(self, constraint_dict, options): """ Return result of constraints and options against a template """ constraints = {} template = Template(template=self.manifest) for constraint in constraint_dict: if constraint != 'self': if (constraint_dict[constraint] or constraint_dict[constraint] == ''): constraints[constraint] = constraint_dict[constraint] results = template.constrained_sections(constraints=constraints, options=options) return results, template def get_path(self, repo, core=False): """ Return the path for the repo """ if repo.endswith('.git'): repo = repo.split('.git')[0] org, name = repo.split('/')[-2:] path = self.path_dirs.plugins_dir path = join(path, org, name) return path, org, name def apply_path(self, repo): """ Set path to where the repo is and return original path """ self.logger.info('Starting: apply_path') self.logger.info('repo given: ' + str(repo)) try: # rewrite repo for consistency if repo.endswith('.git'): repo = repo.split('.git')[0] # get org and repo name and path repo will be cloned to org, name = repo.split('/')[-2:] path = join(self.path_dirs.plugins_dir, org, name) self.logger.info('cloning to path: ' + str(path)) # save current path cwd = getcwd() # set to new repo path self.path_dirs.ensure_dir(path) chdir(path) status = (True, cwd, path) except Exception as e: # pragma: no cover self.logger.error('apply_path failed with error: ' + str(e)) status = (False, str(e)) self.logger.info('Status of apply_path: ' + str(status)) self.logger.info('Finished: apply_path') return status def checkout(self, branch='master', version='HEAD'): """ Checkout a specific version and branch of a repo """ self.logger.info('Starting: checkout') self.logger.info('branch given: ' + str(branch)) self.logger.info('version given: ' + str(version)) try: status = check_output(shlex.split('git checkout ' + branch), stderr=STDOUT, close_fds=True).decode('utf-8') status = check_output(shlex.split('git pull'), stderr=STDOUT, close_fds=True).decode('utf-8') status = check_output(shlex.split('git reset --hard ' + version), stderr=STDOUT, close_fds=True).decode('utf-8') response = (True, status) except Exception as e: # pragma: no cover self.logger.error('checkout failed with error: ' + str(e)) response = (False, str(e)) self.logger.info('Status of checkout: ' + str(response)) self.logger.info('Finished: checkout') return response def clone(self, repo, user=None, pw=None): """ Clone the repository """ self.logger.info('Starting: clone') self.logger.info('repo given: ' + str(repo)) self.logger.info('user given: ' + str(user)) status = (True, None) try: status = self.apply_path(repo) # if path already exists, try git checkout to update if status[0]: self.logger.info('path to clone to: ' + str(status[2])) try: check_output(shlex.split('git -C ' + status[2] + ' rev-parse'), stderr=STDOUT, close_fds=True).decode('utf-8') self.logger.info('path already exists: ' + str(status[2])) self.logger.info('Status of clone: ' + str(status[0])) self.logger.info('Finished: clone') chdir(status[1]) return (True, status[1]) except Exception as e: # pragma: no cover self.logger.info("repo doesn't exist, attempting to " + 'clone: ' + str(e)) else: self.logger.error('unable to clone') return status # ensure cloning still works even if ssl is broken cmd = 'git config --global http.sslVerify false' check_output(shlex.split(cmd), stderr=STDOUT, close_fds=True).decode('utf-8') # check if user and pw were supplied, typically for private repos if user and pw: # only https is supported when using user/pw auth_repo = 'https://' + user + ':' + pw + '@' repo = auth_repo + repo.split('https://')[-1] # clone repo and build tools check_output(shlex.split('git clone --recursive ' + repo + ' .'), stderr=STDOUT, close_fds=True).decode('utf-8') chdir(status[1]) status = (True, status[1]) except Exception as e: # pragma: no cover e_str = str(e) # scrub username and password from error message if e_str.find('@') >= 0: e_str = e_str[:e_str.find('//') + 2] + \ e_str[e_str.find('@') + 1:] self.logger.error('clone failed with error: ' + e_str) status = (False, e_str) self.logger.info('Status of clone: ' + str(status)) self.logger.info('Finished: clone') return status def available_tools(self, path, version='HEAD', groups=None): """ Return list of possible tools in repo for the given version and branch """ matches = [] if groups: groups = groups.split(',') for root, _, filenames in walk(path): files = fnmatch.filter(filenames, 'Dockerfile*') # append additional identifiers to tools if multiple in same # directory add_info = len(files) > 1 for f in files: # !! TODO deal with wild/etc.? addtl_info = '' if add_info: # @ will be delimiter symbol for multi-tools try: addtl_info = '@' + f.split('.')[1] except Exception as e: addtl_info = '@unspecified' if groups: if add_info and not addtl_info == '@unspecified': tool_template = addtl_info.split('@')[1] + '.template' else: tool_template = 'vent.template' try: template = Template(template=join(root, tool_template)) for group in groups: template_groups = template.option('info', 'groups') if (template_groups[0] and group in template_groups[1]): matches.append( (root.split(path)[1] + addtl_info, version)) except Exception as e: # pragma: no cover self.logger.info('error: ' + str(e)) else: matches.append((root.split(path)[1] + addtl_info, version)) return matches @staticmethod def tool_matches(tools=None, version='HEAD'): """ Get the tools paths and versions that were specified """ matches = [] if tools: for tool in tools: match_version = version if tool[1] != '': match_version = tool[1] match = '' if tool[0].endswith('/'): match = tool[0][:-1] elif tool[0] != '.': match = tool[0] if not match.startswith('/') and match != '': match = '/' + match matches.append((match, match_version)) return matches def start_sections(self, s, files, groups, enabled, branch, version): """ Run through sections for prep_start """ tool_d = {} status = (True, None) for section in s: # initialize needed vars c_name = s[section]['image_name'].replace(':', '-') c_name = c_name.replace('/', '-') instance_num = re.search(r'\d+$', s[section]['name']) if instance_num: c_name += instance_num.group() image_name = s[section]['image_name'] # checkout the right version and branch of the repo cwd = getcwd() self.logger.info('current directory is: ' + str(cwd)) # images built from registry won't have path if s[section]['path'] != '': chdir(join(s[section]['path'])) # TODO commenting out for now, should use update_repo #status = self.checkout(branch=branch, version=version) status = (True, None) self.logger.info(status) chdir(cwd) tool_d[c_name] = {'image': image_name, 'name': c_name} # get rid of all commented sections in various runtime # configurations manifest = Template(self.manifest) overall_dict = {} for setting in ['info', 'docker', 'gpu', 'settings', 'service']: option = manifest.option(section, setting) if option[0]: overall_dict[setting] = {} settings_dict = json.loads(option[1]) for opt in settings_dict: if not opt.startswith('#'): overall_dict[setting][opt] = \ settings_dict[opt] if 'docker' in overall_dict: options_dict = overall_dict['docker'] for option in options_dict: options = options_dict[option] # check for commands to evaluate if '`' in options: cmds = options.split('`') if len(cmds) > 2: i = 1 while i < len(cmds): try: cmds[i] = check_output( shlex.split(cmds[i]), stderr=STDOUT, close_fds=True).strip().decode('utf-8') except Exception as e: # pragma: no cover self.logger.error( 'unable to evaluate command specified in vent.template: ' + str(e)) i += 2 options = ''.join(cmds) # check for commands to evaluate # store options set for docker try: tool_d[c_name][option] = literal_eval(options) except Exception as e: # pragma: no cover self.logger.info('unable to literal_eval: ' + str(options)) tool_d[c_name][option] = options if 'labels' not in tool_d[c_name]: tool_d[c_name]['labels'] = {} # get the service uri info if 'service' in overall_dict: try: options_dict = overall_dict['service'] for option in options_dict: tool_d[c_name]['labels'][option] = options_dict[option] except Exception as e: # pragma: no cover self.logger.error('unable to store service options for ' 'docker: ' + str(e)) # check for gpu settings if 'gpu' in overall_dict: try: options_dict = json.loads(status[1]) for option in options_dict: tool_d[c_name]['labels']['gpu.' + option] = options_dict[option] except Exception as e: # pragma: no cover self.logger.error('unable to store gpu options for ' 'docker: ' + str(e)) # get temporary name for links, etc. plugin_c = Template(template=self.manifest) status, plugin_sections = plugin_c.sections() self.logger.info(status) for plugin_section in plugin_sections: status = plugin_c.option(plugin_section, 'link_name') self.logger.info(status) image_status = plugin_c.option(plugin_section, 'image_name') self.logger.info(image_status) if status[0] and image_status[0]: cont_name = image_status[1].replace(':', '-') cont_name = cont_name.replace('/', '-') if cont_name not in tool_d: tool_d[cont_name] = { 'image': image_status[1], 'name': cont_name, 'start': False } tool_d[cont_name]['tmp_name'] = status[1] # add extra labels tool_d[c_name]['labels']['vent'] = Version() tool_d[c_name]['labels']['vent.namespace'] = s[section][ 'namespace'] tool_d[c_name]['labels']['vent.branch'] = branch tool_d[c_name]['labels']['vent.version'] = version tool_d[c_name]['labels']['vent.name'] = s[section]['name'] tool_d[c_name]['labels']['vent.section'] = section tool_d[c_name]['labels']['vent.repo'] = s[section]['repo'] tool_d[c_name]['labels']['vent.type'] = s[section]['type'] # check for log_config settings in external-services externally_configured = False vent_config = Template(self.path_dirs.cfg_file) for ext_tool in vent_config.section('external-services')[1]: if ext_tool[0].lower() == 'syslog': try: log_dict = json.loads(ext_tool[1]) # configure if not locally active if ('locally_active' not in log_dict or log_dict['locally_active'] == 'no'): del log_dict['locally_active'] log_config = {} log_config['type'] = 'syslog' log_config['config'] = {} ip_address = '' port = '' for option in log_dict: if option == 'ip_address': ip_address = log_dict[option] elif option == 'port': port = log_dict['port'] syslog_address = 'tcp://' + ip_address + ':' + port syslog_config = { 'syslog-address': syslog_address, 'syslog-facility': 'daemon', 'tag': '{{.Name}}' } log_config['config'].update(syslog_config) externally_configured = True except Exception as e: # pragma: no cover self.logger.error('external settings for log_config' " couldn't be stored because: " + str(e)) externally_configured = False if not externally_configured: log_config = { 'type': 'syslog', 'config': { 'syslog-address': 'tcp://0.0.0.0:514', 'syslog-facility': 'daemon', 'tag': '{{.Name}}' } } if 'groups' in s[section]: # add labels for groups tool_d[c_name]['labels']['vent.groups'] = s[section]['groups'] # add restart=always to core containers if 'core' in s[section]['groups']: tool_d[c_name]['restart_policy'] = {'Name': 'always'} # map network names to environment variables if 'network' in s[section]['groups']: vent_config = Template(template=self.path_dirs.cfg_file) nic_mappings = vent_config.section('network-mapping') nics = '' if nic_mappings[0]: for nic in nic_mappings[1]: nics += nic[0] + ':' + nic[1] + ',' nics = nics[:-1] if nics: if 'environment' in tool_d[c_name]: tool_d[c_name]['environment'].append('VENT_NICS=' + nics) else: tool_d[c_name]['environment'] = [ 'VENT_NICS=' + nics ] # send logs to syslog if ('syslog' not in s[section]['groups'] and 'core' in s[section]['groups']): log_config['config']['tag'] = '{{.Name}}' tool_d[c_name]['log_config'] = log_config if 'syslog' not in s[section]['groups']: tool_d[c_name]['log_config'] = log_config # mount necessary directories if 'files' in s[section]['groups']: ulimits = [] ulimits.append( docker.types.Ulimit(name='nofile', soft=1048576, hard=1048576)) tool_d[c_name]['ulimits'] = ulimits # check if running in a docker container if 'VENT_CONTAINERIZED' in environ and environ[ 'VENT_CONTAINERIZED'] == 'true': if 'volumes_from' in tool_d[c_name]: tool_d[c_name]['volumes_from'].append( environ['HOSTNAME']) else: tool_d[c_name]['volumes_from'] = [ environ['HOSTNAME'] ] else: if 'volumes' in tool_d[c_name]: tool_d[c_name]['volumes'][ self.path_dirs.base_dir[:-1]] = { 'bind': '/vent', 'mode': 'ro' } else: tool_d[c_name]['volumes'] = { self.path_dirs.base_dir[:-1]: { 'bind': '/vent', 'mode': 'ro' } } if files[0]: if 'volumes' in tool_d[c_name]: tool_d[c_name]['volumes'][files[1]] = { 'bind': '/files', 'mode': 'rw' } else: tool_d[c_name]['volumes'] = { files[1]: { 'bind': '/files', 'mode': 'rw' } } else: tool_d[c_name]['log_config'] = log_config # add label for priority if 'settings' in overall_dict: try: options_dict = overall_dict['settings'] for option in options_dict: if option == 'priority': tool_d[c_name]['labels'][ 'vent.priority'] = options_dict[option] except Exception as e: # pragma: no cover self.logger.error('unable to store settings options ' 'for docker ' + str(e)) # only start tools that have been built if s[section]['built'] != 'yes': del tool_d[c_name] # store section information for adding info to manifest later else: tool_d[c_name]['section'] = section return status, tool_d def prep_start(self, repo=None, name=None, groups=None, enabled='yes', branch='master', version='HEAD'): """ Start a set of tools that match the parameters given, if no parameters are given, start all installed tools on the master branch at verison HEAD that are enabled """ args = locals() self.logger.info('Starting: prep_start') self.logger.info('Arguments: ' + str(args)) status = (False, None) try: options = [ 'name', 'namespace', 'built', 'groups', 'path', 'image_name', 'branch', 'repo', 'type', 'version' ] vent_config = Template(template=self.path_dirs.cfg_file) manifest = Template(self.manifest) files = vent_config.option('main', 'files') files = (files[0], expanduser(files[1])) s, _ = self.constraint_options(args, options) status, tool_d = self.start_sections(s, files, groups, enabled, branch, version) # look out for links to delete because they're defined externally links_to_delete = set() # get instances for each tool tool_instances = {} sections = manifest.sections()[1] for section in sections: settings = manifest.option(section, 'settings') if settings[0]: settings = json.loads(settings[1]) if 'instances' in settings: l_name = manifest.option(section, 'link_name') if l_name[0]: tool_instances[l_name[1]] = int( settings['instances']) # check and update links, volumes_from, network_mode for container in list(tool_d.keys()): if 'labels' not in tool_d[ container] or 'vent.groups' not in tool_d[container][ 'labels'] or 'core' not in tool_d[container][ 'labels']['vent.groups']: tool_d[container]['remove'] = True if 'links' in tool_d[container]: for link in list(tool_d[container]['links'].keys()): # add links to external services already running if # necessary, by default configure local services too configure_local = True ext = 'external-services' if link in vent_config.options(ext)[1]: try: lconf = json.loads( vent_config.option(ext, link)[1]) if ('locally_active' not in lconf or lconf['locally_active'] == 'no'): ip_adr = lconf['ip_address'] port = lconf['port'] tool_d[container]['extra_hosts'] = {} # containers use lowercase names for # connections tool_d[container]['extra_hosts'][ link.lower()] = ip_adr # create an environment variable for container # to access port later env_variable = link.upper() + \ '_CUSTOM_PORT=' + port if 'environment' not in tool_d[container]: tool_d[container]['environment'] = [] tool_d[container]['environment'].append( env_variable) # remove the entry from links because no # longer connecting to local container links_to_delete.add(link) configure_local = False except Exception as e: # pragma: no cover self.logger.error("couldn't load external" ' settings because: ' + str(e)) configure_local = True status = False if configure_local: for c in list(tool_d.keys()): if ('tmp_name' in tool_d[c] and tool_d[c]['tmp_name'] == link): tool_d[container]['links'][ tool_d[c]['name']] = tool_d[container][ 'links'].pop(link) if link in tool_instances and tool_instances[ link] > 1: for i in range( 2, tool_instances[link] + 1): tool_d[container]['links'][ tool_d[c]['name'] + str(i)] = tool_d[container][ 'links'][tool_d[c] ['name']] + str(i) if 'volumes_from' in tool_d[container]: tmp_volumes_from = tool_d[container]['volumes_from'] tool_d[container]['volumes_from'] = [] for volumes_from in list(tmp_volumes_from): for c in list(tool_d.keys()): if ('tmp_name' in tool_d[c] and tool_d[c]['tmp_name'] == volumes_from): tool_d[container]['volumes_from'].append( tool_d[c]['name']) tmp_volumes_from.remove(volumes_from) tool_d[container]['volumes_from'] += tmp_volumes_from if 'network_mode' in tool_d[container]: if tool_d[container]['network_mode'].startswith( 'container:'): network_c_name = tool_d[container][ 'network_mode'].split('container:')[1] for c in list(tool_d.keys()): if ('tmp_name' in tool_d[c] and tool_d[c]['tmp_name'] == network_c_name): tool_d[container]['network_mode'] = 'container:' + \ tool_d[c]['name'] # remove tmp_names for c in list(tool_d.keys()): if 'tmp_name' in tool_d[c]: del tool_d[c]['tmp_name'] # remove links section if all were externally configured for c in list(tool_d.keys()): if 'links' in tool_d[c]: for link in links_to_delete: if link in tool_d[c]['links']: del tool_d[c]['links'][link] # delete links if no more defined if not tool_d[c]['links']: del tool_d[c]['links'] # remove containers that shouldn't be started for c in list(tool_d.keys()): deleted = False if 'start' in tool_d[c] and not tool_d[c]['start']: del tool_d[c] deleted = True if not deleted: # look for tools services that are being done externally # tools are capitalized in vent.cfg, so make them lowercase # for comparison ext = 'external-services' external_tools = vent_config.section(ext)[1] name = tool_d[c]['labels']['vent.name'] for tool in external_tools: if name == tool[0].lower(): try: tool_config = json.loads(tool[1]) if ('locally_active' in tool_config and tool_config['locally_active'] == 'no'): del tool_d[c] except Exception as e: # pragma: no cover self.logger.warning( 'Locally running container ' + name + ' may be redundant') if status: status = (True, tool_d) else: status = (False, tool_d) except Exception as e: # pragma: no cover self.logger.error('prep_start failed with error: ' + str(e)) status = (False, e) self.logger.info('Status of prep_start: ' + str(status[0])) self.logger.info('Finished: prep_start') return status def start_priority_containers(self, groups, group_orders, tool_d): """ Select containers based on priorities to start """ vent_cfg = Template(self.path_dirs.cfg_file) cfg_groups = vent_cfg.option('groups', 'start_order') if cfg_groups[0]: cfg_groups = cfg_groups[1].split(',') else: cfg_groups = [] all_groups = sorted(set(groups)) s_conts = [] f_conts = [] # start tools in order of group defined in vent.cfg for group in cfg_groups: # remove from all_groups because already checked out if group in all_groups: all_groups.remove(group) if group in group_orders: for cont_t in sorted(group_orders[group]): if cont_t[1] not in s_conts: s_conts, f_conts = self.start_containers( cont_t[1], tool_d, s_conts, f_conts) # start tools that haven't been specified in the vent.cfg, if any for group in all_groups: if group in group_orders: for cont_t in sorted(group_orders[group]): if cont_t[1] not in s_conts: s_conts, f_conts = self.start_containers( cont_t[1], tool_d, s_conts, f_conts) return (s_conts, f_conts) def start_remaining_containers(self, containers_remaining, tool_d): """ Select remaining containers that didn't have priorities to start """ s_containers = [] f_containers = [] for container in containers_remaining: s_containers, f_containers = self.start_containers( container, tool_d, s_containers, f_containers) return (s_containers, f_containers) def start_containers(self, container, tool_d, s_containers, f_containers): """ Start container that was passed in and return status """ # use section to add info to manifest section = tool_d[container]['section'] del tool_d[container]['section'] manifest = Template(self.manifest) try: c = self.d_client.containers.get(container) c.start() s_containers.append(container) manifest.set_option(section, 'running', 'yes') self.logger.info('started ' + str(container) + ' with ID: ' + str(c.short_id)) except Exception as err: try: gpu = 'gpu.enabled' failed = False if (gpu in tool_d[container]['labels'] and tool_d[container]['labels'][gpu] == 'yes'): vent_config = Template(template=self.path_dirs.cfg_file) port = '' host = '' result = vent_config.option('nvidia-docker-plugin', 'port') if result[0]: port = result[1] else: port = '3476' result = vent_config.option('nvidia-docker-plugin', 'host') if result[0]: host = result[1] else: # now just requires ip, ifconfig try: route = check_output( ('ip', 'route')).decode('utf-8').split('\n') default = '' # grab the default network device. for device in route: if 'default' in device: default = device.split()[4] break # grab the IP address for the default device ip_addr = check_output( ('ifconfig', default)).decode('utf-8') ip_addr = ip_addr.split('\n')[1].split()[1] host = ip_addr except Exception as e: # pragma no cover self.logger.error('failed to grab ip. Ensure that \ ip and ifconfig are installed') nd_url = 'http://' + host + ':' + port + '/v1.0/docker/cli' params = {'vol': 'nvidia_driver'} r = requests.get(nd_url, params=params) if r.status_code == 200: options = r.text.split() for option in options: if option.startswith('--volume-driver='): tool_d[container][ 'volume_driver'] = option.split('=', 1)[1] elif option.startswith('--volume='): vol = option.split('=', 1)[1].split(':') if 'volumes' in tool_d[container]: if isinstance(tool_d[container]['volumes'], list): if len(vol) == 2: c_vol = vol[0] + \ ':' + vol[1] + ':rw' else: c_vol = vol[0] + ':' + \ vol[1] + ':' + vol[2] tool_d[container]['volumes'].append( c_vol) else: # Dictionary tool_d[container]['volumes'][ vol[0]] = { 'bind': vol[1], 'mode': vol[2] } else: tool_d[container]['volumes'] = { vol[0]: { 'bind': vol[1], 'mode': vol[2] } } elif option.startswith('--device='): dev = option.split('=', 1)[1] if 'devices' in tool_d[container]: tool_d[container]['devices'].append(dev + ':' + dev + ':rwm') else: tool_d[container]['devices'] = [ dev + ':' + dev + ':rwm' ] else: self.logger.error('Unable to parse ' + 'nvidia-docker option: ' + str(option)) else: failed = True f_containers.append(container) manifest.set_option(section, 'running', 'failed') self.logger.error('failed to start ' + str(container) + ' because nvidia-docker-plugin ' + 'failed with: ' + str(r.status_code)) if not failed: try: self.d_client.containers.remove(container, force=True) self.logger.info('removed old existing container: ' + str(container)) except Exception as e: pass cont_id = self.d_client.containers.run(detach=True, **tool_d[container]) s_containers.append(container) manifest.set_option(section, 'running', 'yes') self.logger.info('started ' + str(container) + ' with ID: ' + str(cont_id)) except Exception as e: # pragma: no cover f_containers.append(container) manifest.set_option(section, 'running', 'failed') self.logger.error('failed to start ' + str(container) + ' because: ' + str(e)) # save changes made to manifest manifest.write_config() return s_containers, f_containers
class System: def __init__(self, *args, **kwargs): self.d_client = docker.from_env() self.path_dirs = PathDirs(**kwargs) self.manifest = join(self.path_dirs.meta_dir, 'plugin_manifest.cfg') self.vent_config = self.path_dirs.cfg_file self.startup_file = self.path_dirs.startup_file self.logger = Logger(__name__) self._auto_install() def _auto_install(self): """ Automatically detects images and installs them in the manifest if they are not there already """ template = Template(template=self.manifest) sections = template.sections() images = self.d_client.images.list(filters={'label': 'vent'}) add_sections = [] status = (True, None) for image in images: ignore = False if ('Labels' in image.attrs['Config'] and 'vent.section' in image.attrs['Config']['Labels'] and not image.attrs['Config']['Labels']['vent.section'] in sections[1]): section = image.attrs['Config']['Labels']['vent.section'] section_str = image.attrs['Config']['Labels']['vent.section'].split( ':') template.add_section(section) if 'vent.name' in image.attrs['Config']['Labels']: template.set_option(section, 'name', image.attrs['Config']['Labels']['vent.name']) if 'vent.repo' in image.attrs['Config']['Labels']: template.set_option(section, 'repo', image.attrs['Config']['Labels']['vent.repo']) git_path = join(self.path_dirs.plugins_dir, '/'.join(section_str[:2])) # TODO clone it down template.set_option(section, 'path', join( git_path, section_str[-3][1:])) # get template settings # TODO account for template files not named vent.template v_template = Template(template=join( git_path, section_str[-3][1:], 'vent.template')) tool_sections = v_template.sections() if tool_sections[0]: for s in tool_sections[1]: section_dict = {} options = v_template.options(s) if options[0]: for option in options[1]: option_name = option if option == 'name': # get link name template.set_option(section, 'link_name', v_template.option(s, option)[1]) option_name = 'link_name' opt_val = v_template.option(s, option)[1] section_dict[option_name] = opt_val if section_dict: template.set_option(section, s, json.dumps(section_dict)) if ('vent.type' in image.attrs['Config']['Labels'] and image.attrs['Config']['Labels']['vent.type'] == 'repository'): template.set_option( section, 'namespace', '/'.join(section_str[:2])) template.set_option(section, 'branch', section_str[-2]) template.set_option(section, 'version', section_str[-1]) template.set_option(section, 'last_updated', str( datetime.utcnow()) + ' UTC') if image.attrs['RepoTags']: template.set_option( section, 'image_name', image.attrs['RepoTags'][0]) else: # image with none tag is outdated, don't add it ignore = True template.set_option(section, 'type', 'repository') if 'vent.groups' in image.attrs['Config']['Labels']: template.set_option(section, 'groups', image.attrs['Config']['Labels']['vent.groups']) template.set_option(section, 'built', 'yes') template.set_option(section, 'image_id', image.attrs['Id'].split(':')[1][:12]) template.set_option(section, 'running', 'no') # check if image is running as a container containers = self.d_client.containers.list( filters={'label': 'vent'}) for container in containers: if container.attrs['Image'] == image.attrs['Id']: template.set_option(section, 'running', 'yes') if not ignore: add_sections.append(section) template.write_config() # TODO this check will always be true, need to actually validate the above logic if status[0]: status = (True, add_sections) return status def backup(self): """ Saves the configuration information of the current running vent instance to be used for restoring at a later time """ status = (True, None) # initialize all needed variables (names for backup files, etc.) backup_name = ('.vent-backup-' + '-'.join(Timestamp().split(' '))) backup_dir = join(expanduser('~'), backup_name) backup_manifest = join(backup_dir, 'backup_manifest.cfg') backup_vcfg = join(backup_dir, 'backup_vcfg.cfg') manifest = self.manifest # create new backup directory try: mkdir(backup_dir) except Exception as e: # pragma: no cover self.logger.error(str(e)) return (False, str(e)) # create new files in backup directory try: # backup manifest with open(backup_manifest, 'w') as bmanifest: with open(manifest, 'r') as manifest_file: bmanifest.write(manifest_file.read()) # backup vent.cfg with open(backup_vcfg, 'w') as bvcfg: with open(self.vent_config, 'r') as vcfg_file: bvcfg.write(vcfg_file.read()) self.logger.info('Backup information written to ' + backup_dir) status = (True, backup_dir) except Exception as e: # pragma: no cover self.logger.error("Couldn't backup vent: " + str(e)) status = (False, str(e)) # TODO #266 return status def configure(self): # TODO return def gpu(self): # TODO return def history(self): # TODO #255 return def restore(self, backup_dir): """ Restores a vent configuration from a previously backed up version """ # TODO #266 status = (True, None) return status def reset(self): """ Factory reset all of Vent's user data, containers, and images """ status = (True, None) error_message = '' # remove containers try: c_list = set(self.d_client.containers.list( filters={'label': 'vent'}, all=True)) for c in c_list: c.remove(force=True) except Exception as e: # pragma: no cover error_message += 'Error removing Vent containers: ' + str(e) + '\n' # remove images try: i_list = set(self.d_client.images.list(filters={'label': 'vent'}, all=True)) for i in i_list: # delete tagged images only because they are the parents for # the untagged images. Remove the parents and the children get # removed automatically if i.attrs['RepoTags']: self.d_client.images.remove(image=i.id, force=True) except Exception as e: # pragma: no cover error_message += 'Error deleting Vent images: ' + str(e) + '\n' # remove .vent folder try: cwd = getcwd() if cwd.startswith(join(expanduser('~'), '.vent')): chdir(expanduser('~')) shutil.rmtree(join(expanduser('~'), '.vent')) except Exception as e: # pragma: no cover error_message += 'Error deleting Vent data: ' + str(e) + '\n' if error_message: status = (False, error_message) return status def rollback(self): # TODO #266 return def start(self): status = (True, None) # startup based on startup file if exists(self.startup_file): status = self._startup() else: tools = Tools() status = tools.new('core', None) if status[0]: status = tools.start( 'https://github.com/cyberreboot/vent', None) return status def _startup(self): """ Automatically detect if a startup file is specified and stand up a vent host with all necessary tools based on the specifications in that file """ status = (True, None) try: s_dict = {} # rewrite the yml file to exclusively lowercase with open(self.startup_file, 'r') as sup: vent_startup = sup.read() with open(self.startup_file, 'w') as sup: for line in vent_startup: sup.write(line.lower()) with open(self.startup_file, 'r') as sup: s_dict = yaml.safe_load(sup.read()) if 'vent.cfg' in s_dict: v_cfg = Template(self.vent_config) for section in s_dict['vent.cfg']: for option in s_dict['vent.cfg'][section]: val = ('no', 'yes')[ s_dict['vent.cfg'][section][option]] v_status = v_cfg.add_option(section, option, value=val) if not v_status[0]: v_cfg.set_option(section, option, val) v_cfg.write_config() del s_dict['vent.cfg'] tool_d = {} extra_options = ['info', 'service', 'settings', 'docker', 'gpu'] s_dict_c = copy.deepcopy(s_dict) # TODO check for repo or image type for repo in s_dict_c: repository = Repository(System().manifest) repository.repo = repo repository._clone() repo_path, org, r_name = self.path_dirs.get_path(repo) get_tools = [] for tool in s_dict_c[repo]: t_branch = 'master' t_version = 'HEAD' if 'branch' in s_dict[repo][tool]: t_branch = s_dict[repo][tool]['branch'] if 'version' in s_dict[repo][tool]: t_version = s_dict[repo][tool]['version'] get_tools.append((tool, t_branch, t_version)) available_tools = AvailableTools(repo_path, tools=get_tools) for tool in s_dict_c[repo]: # if we can't find the tool in that repo, skip over this # tool and notify in the logs t_path, t_path_cased = PathDirs.rel_path( tool, available_tools) if t_path is None: self.logger.error("Couldn't find tool " + tool + ' in' ' repo ' + repo) continue # ensure no NoneType iteration errors if s_dict_c[repo][tool] is None: s_dict[repo][tool] = {} # check if we need to configure instances along the way instances = 1 if 'settings' in s_dict[repo][tool]: if 'instances' in s_dict[repo][tool]['settings']: instances = int(s_dict[repo][tool] ['settings']['instances']) # add the tool t_branch = 'master' t_version = 'HEAD' t_image = None add_tools = None add_tools = [(t_path_cased, '')] if 'branch' in s_dict[repo][tool]: t_branch = s_dict[repo][tool]['branch'] if 'version' in s_dict[repo][tool]: t_version = s_dict[repo][tool]['version'] if 'image' in s_dict[repo][tool]: t_image = s_dict[repo][tool]['image'] repository.add( repo, add_tools, branch=t_branch, version=t_version, image_name=t_image) manifest = Template(self.manifest) # update the manifest with extra defined runtime settings base_section = ':'.join([org, r_name, t_path, t_branch, t_version]) for option in extra_options: if option in s_dict[repo][tool]: opt_dict = manifest.option(base_section, option) # add new values defined into default options for # that tool, don't overwrite them if opt_dict[0]: opt_dict = json.loads(opt_dict[1]) else: opt_dict = {} # stringify values for vent for v in s_dict[repo][tool][option]: pval = s_dict[repo][tool][option][v] s_dict[repo][tool][option][v] = json.dumps( pval) opt_dict.update(s_dict[repo][tool][option]) manifest.set_option(base_section, option, json.dumps(opt_dict)) # copy manifest info into new sections if necessary if instances > 1: for i in range(2, instances + 1): i_section = base_section.rsplit(':', 2) i_section[0] += str(i) i_section = ':'.join(i_section) manifest.add_section(i_section) for opt_val in manifest.section(base_section)[1]: if opt_val[0] == 'name': manifest.set_option(i_section, opt_val[0], opt_val[1] + str(i)) else: manifest.set_option(i_section, opt_val[0], opt_val[1]) manifest.write_config() tool_d = {} tools = Tools() # start tools, if necessary for repo in s_dict: for tool in s_dict[repo]: if 'start' in s_dict[repo][tool]: if s_dict[repo][tool]['start']: local_instances = 1 if 'settings' in s_dict[repo][tool] and 'instances' in s_dict[repo][tool]['settings']: local_instances = int( s_dict[repo][tool]['settings']['instances']) t_branch = 'master' t_version = 'HEAD' if 'branch' in s_dict[repo][tool]: t_branch = s_dict[repo][tool]['branch'] if 'version' in s_dict[repo][tool]: t_version = s_dict[repo][tool]['version'] for i in range(1, local_instances + 1): i_name = tool + str(i) if i != 1 else tool i_name = i_name.replace('@', '') tool_d.update( tools._prep_start(repo, i_name)[1]) if tool_d: tools.start(tool_d, None, is_tool_d=True) except Exception as e: # pragma: no cover self.logger.error('Startup failed because: {0}'.format(str(e))) status = (False, str(e)) return status def stop(self): status = (True, None) # remove containers try: c_list = set(self.d_client.containers.list( filters={'label': 'vent'}, all=True)) for c in c_list: c.remove(force=True) except Exception as e: # pragma: no cover status = (False, str(e)) return status def get_configure(self, repo=None, name=None, groups=None, main_cfg=False): """ Get the vent.template settings for a given tool by looking at the plugin_manifest """ constraints = locals() del constraints['main_cfg'] status = (True, None) template_dict = {} return_str = '' if main_cfg: vent_cfg = Template(self.vent_config) for section in vent_cfg.sections()[1]: template_dict[section] = {} for vals in vent_cfg.section(section)[1]: template_dict[section][vals[0]] = vals[1] else: # all possible vent.template options stored in plugin_manifest options = ['info', 'service', 'settings', 'docker', 'gpu'] tools = Template(System().manifest).constrain_opts( constraints, options)[0] if tools: # should only be one tool tool = list(tools.keys())[0] # load all vent.template options into dict for section in tools[tool]: template_dict[section] = json.loads(tools[tool][section]) else: status = (False, "Couldn't get vent.template information") if status[0]: # display all those options as they would in the file for section in template_dict: return_str += '[' + section + ']\n' # ensure instances shows up in configuration for option in template_dict[section]: if option.startswith('#'): return_str += option + '\n' else: return_str += option + ' = ' return_str += template_dict[section][option] + '\n' return_str += '\n' # only one newline at end of file status = (True, return_str[:-1]) return status def save_configure(self, repo=None, name=None, groups=None, config_val='', from_registry=False, main_cfg=False, instances=1): """ Save changes made to vent.template through npyscreen to the template and to plugin_manifest """ def template_to_manifest(vent_template, manifest, tool, instances): """ Helper function to transfer information from vent.template to plugin_manifest """ sections = vent_template.sections() if sections[0]: for section in sections[1]: section_dict = {} if section == 'settings': section_dict.update({'instances': str(instances)}) options = vent_template.options(section) if options[0]: for option in options[1]: option_name = option if option == 'name': option_name = 'link_name' opt_val = vent_template.option(section, option)[1] section_dict[option_name] = opt_val if section_dict: manifest.set_option(tool, section, json.dumps(section_dict)) elif manifest.option(tool, section)[0]: manifest.del_option(tool, section) constraints = locals() del constraints['config_val'] del constraints['from_registry'] del constraints['main_cfg'] del constraints['instances'] del constraints['template_to_manifest'] status = (True, None) fd = None # ensure instances is an int and remove instances from config_val to # ensure correct info instances = int(instances) config_val = re.sub(r'instances\ *=\ *\d+\n', '', config_val) api_system = System() manifest = api_system.manifest if not main_cfg: if not from_registry: # creating new instances if instances > 1: fd, template_path = tempfile.mkstemp(suffix='.template') # scrub name for clean section name if re.search(r'\d+$', name): name = re.sub(r'\d+$', '', name) t_identifier = {'name': name, 'branch': branch, 'version': version} result = Template(manifest).constrain_opts( t_identifier, []) tools = result[0] tool = list(tools.keys())[0] else: options = ['path', 'multi_tool', 'name'] tools, _ = Template(manifest).constrain_opts( constraints, options) # only one tool in tools because perform this function for # every tool if tools: tool = list(tools.keys())[0] if ('multi_tool' in tools[tool] and tools[tool]['multi_tool'] == 'yes'): name = tools[tool]['name'] if name == 'unspecified': name = 'vent' template_path = join(tools[tool]['path'], name+'.template') else: template_path = join(tools[tool]['path'], 'vent.template') else: status = (False, "Couldn't save configuration") else: fd, template_path = tempfile.mkstemp(suffix='.template') options = ['namespace'] constraints.update({'type': 'registry'}) tools, _ = Template(manifest).constrain_opts(constraints, options) if tools: tool = list(tools.keys())[0] else: status = (False, "Couldn't save configuration") if status[0]: try: # save in vent.template with open(template_path, 'w') as f: f.write(config_val) # save in plugin_manifest vent_template = Template(template_path) manifest = Template(manifest) if instances > 1: # add instances as needed for i in range(1, instances + 1): i_section = tool.rsplit(':', 2) i_section[0] += str(i) if i != 1 else '' i_section = ':'.join(i_section) if not manifest.section(i_section)[0]: manifest.add_section(i_section) for val_pair in manifest.section(tool)[1]: name = val_pair[0] val = val_pair[1] if name == 'name': val += str(i) elif name == 'last_updated': val = Timestamp() elif name == 'running': val = 'no' manifest.set_option(i_section, name, val) template_to_manifest(vent_template, manifest, i_section, instances) else: settings = manifest.option(i_section, 'settings') if settings[0]: settings_dict = json.loads(settings[1]) settings_dict['instances'] = str(instances) manifest.set_option(i_section, 'settings', json.dumps( settings_dict)) else: inst = str(instances) settings_dict = {'instances': inst} manifest.set_option(i_section, 'settings', json.dumps( settings_dict)) else: try: settings_str = manifest.option(tool, 'settings')[1] settings_dict = json.loads(settings_str) old_instances = int(settings_dict['instances']) except Exception: old_instances = 1 template_to_manifest(vent_template, manifest, tool, old_instances) manifest.write_config() status = (True, manifest) except Exception as e: # pragma: no cover self.logger.error('save_configure error: ' + str(e)) status = (False, str(e)) # close os file handle and remove temp file if from_registry or instances > 1: try: close(fd) remove(template_path) except Exception as e: # pragma: no cover self.logger.error('save_configure error: ' + str(e)) else: with open(self.vent_config, 'w') as f: f.write(config_val) return status def restart_tools(self, repo=None, name=None, groups=None, branch='master', version='HEAD', main_cfg=False, old_val='', new_val=''): """ Restart necessary tools based on changes that have been made either to vent.cfg or to vent.template. This includes tools that need to be restarted because they depend on other tools that were changed. """ status = (True, None) if not main_cfg: try: t_identifier = {'name': name, 'branch': branch, 'version': version} result = Template(System().manifest).constrain_opts(t_identifier, ['running', 'link_name']) tools = result[0] tool = list(tools.keys())[0] if ('running' in tools[tool] and tools[tool]['running'] == 'yes'): start_tools = [t_identifier] dependent_tools = [tools[tool]['link_name']] start_tools += Dependencies(dependent_tools) # TODO start_d = {} for tool_identifier in start_tools: self.clean(**tool_identifier) start_d.update(self.prep_start(**tool_identifier)[1]) if start_d: Tools().start(start_d, '', is_tool_d=True) except Exception as e: # pragma: no cover self.logger.error('Trouble restarting tool ' + name + ' because: ' + str(e)) status = (False, str(e)) else: try: # string manipulation to get tools into arrays ext_start = old_val.find('[external-services]') if ext_start >= 0: ot_str = old_val[old_val.find('[external-services]') + 20:] else: ot_str = '' old_tools = [] for old_tool in ot_str.split('\n'): if old_tool != '': old_tools.append(old_tool.split('=')[0].strip()) ext_start = new_val.find('[external-services]') if ext_start >= 0: nt_str = new_val[new_val.find('[external-services]') + 20:] else: nt_str = '' new_tools = [] for new_tool in nt_str.split('\n'): if new_tool != '': new_tools.append(new_tool.split('=')[0].strip()) # find tools changed tool_changes = [] for old_tool in old_tools: if old_tool not in new_tools: tool_changes.append(old_tool) for new_tool in new_tools: if new_tool not in old_tools: tool_changes.append(new_tool) else: # tool name will be the same oconf = old_val[old_val.find(new_tool):].split('\n')[0] nconf = new_val[new_val.find(new_tool):].split('\n')[0] if oconf != nconf: tool_changes.append(new_tool) # put link names in a dictionary for finding dependencies dependent_tools = [] for i, entry in enumerate(tool_changes): dependent_tools.append(entry) # change names to lowercase for use in clean, prep_start tool_changes[i] = {'name': entry.lower().replace('-', '_')} dependencies = Dependencies(dependent_tools) # restart tools restart = tool_changes + dependencies tool_d = {} for tool in restart: self.clean(**tool) tool_d.update(self.prep_start(**tool)[1]) if tool_d: # TODO fix the arguments Tools().start(tool_d) except Exception as e: # pragma: no cover self.logger.error('Problem restarting tools: ' + str(e)) status = (False, str(e)) return status def upgrade(self): ''' Upgrades Vent itself, and core containers ''' # TODO return
class Repository: def __init__(self, manifest, *args, **kwargs): self.path_dirs = PathDirs(**kwargs) self.manifest = manifest self.d_client = docker.from_env() self.logger = Logger(__name__) def add(self, repo, tools=None, overrides=None, version='HEAD', core=False, image_name=None, branch='master', build=True, user=None, pw=None): status = (True, None) self.repo = repo.lower() self.tools = tools self.overrides = overrides self.branch = branch self.version = version self.image_name = image_name self.core = core status = self._clone(user=user, pw=pw) if status[0] and build: status = self._build() return status def _build(self): status = (True, None) status = self._get_tools() matches = status[1] status = self.path_dirs.apply_path(self.repo) original_path = status[1] if status[0] and len(matches) > 0: repo, org, name = self.path_dirs.get_path(self.repo) cmd = 'git rev-parse --short ' + self.version commit_id = '' try: commit_id = check_output(shlex.split(cmd), stderr=STDOUT, close_fds=True).strip().decode('utf-8') except Exception as e: # pragma: no cover self.logger.error( 'Unable to get commit ID because: {0}'.format(str(e))) template = Template(template=self.manifest) for match in matches: status, template, match_path, image_name, section = self._build_manifest( match, template, repo, org, name, commit_id) if not status[0]: break status, template = self._build_image(template, match_path, image_name, section) if not status[0]: break if status[0]: # write out configuration to the manifest file template.write_config() chdir(original_path) return status def _get_tools(self): status = (True, None) matches = [] path, _, _ = self.path_dirs.get_path(self.repo) status = Checkout(path, branch=self.branch, version=self.version) if status[0]: if self.tools is None and self.overrides is None: # get all tools matches = AvailableTools( path, branch=self.branch, version=self.version, core=self.core) elif self.tools is None: # there's only something in overrides # grab all the tools then apply overrides matches = AvailableTools( path, branch=self.branch, version=self.version, core=self.core) # !! TODO apply overrides to matches elif self.overrides is None: # there's only something in tools # only grab the tools specified matches = ToolMatches(tools=self.tools, version=self.version) else: # both tools and overrides were specified # grab only the tools specified, with the overrides applied o_matches = ToolMatches(tools=self.tools, version=self.version) matches = o_matches for override in self.overrides: override_t = None if override[0] == '.': override_t = ('', override[1]) else: override_t = override for match in o_matches: if override_t[0] == match[0]: matches.remove(match) matches.append(override_t) status = (True, matches) return status def _build_manifest(self, match, template, repo, org, name, commit_id): status = (True, None) # keep track of whether or not to write an additional manifest # entry for multiple instances, and how many additional entries # to write addtl_entries = 1 # remove @ in match for template setting purposes if match[0].find('@') >= 0: true_name = match[0].split('@')[1] else: true_name = match[0] # TODO check for special settings here first for the specific match self.version = match[1] section = org + ':' + name + ':' + true_name + ':' section += self.branch + ':' + self.version # need to get rid of temp identifiers for tools in same repo match_path = repo + match[0].split('@')[0] if self.image_name: image_name = self.image_name elif not self.core: image_name = org + '/' + name if match[0] != '': # if tool is in a subdir, add that to the name of the # image image_name += '-' + '-'.join(match[0].split('/')[1:]) image_name += ':' + self.branch else: image_name = ('cyberreboot/vent-' + match[0].split('/')[-1] + ':' + self.branch) image_name = image_name.replace('_', '-') # check if the section already exists is_there, options = template.section(section) previous_commit = None previous_commits = None head = False if is_there: for option in options: # TODO check if tool name but a different version # exists - then disable/remove if set if option[0] == 'version' and option[1] == 'HEAD': head = True if option[0] == 'built' and option[1] == 'yes': # !! TODO remove pre-existing image pass if option[0] == 'commit_id': previous_commit = option[1] if option[0] == 'previous_versions': previous_commits = option[1] # check if tool comes from multi directory multi_tool = 'no' if match[0].find('@') >= 0: multi_tool = 'yes' # !! TODO # check if section should be removed from config i.e. all tools # but new commit removed one that was in a previous commit image_name = image_name.lower() image_name = image_name.replace('@', '-') # special case for vent images if image_name.startswith('cyberreboot/vent'): image_name = image_name.replace('vent-vent-core-', 'vent-') image_name = image_name.replace('vent-vent-extras-', 'vent-') # set template section & options for tool at version and branch template.add_section(section) template.set_option(section, 'name', true_name.split('/')[-1]) template.set_option(section, 'namespace', org + '/' + name) template.set_option(section, 'path', match_path) template.set_option(section, 'repo', self.repo) template.set_option(section, 'multi_tool', multi_tool) template.set_option(section, 'branch', self.branch) template.set_option(section, 'version', self.version) template.set_option(section, 'last_updated', str(datetime.utcnow()) + ' UTC') template.set_option(section, 'image_name', image_name) template.set_option(section, 'type', 'repository') # save settings in vent.template to plugin_manifest # watch for multiple tools in same directory # just wanted to store match path with @ for path for use in # other actions tool_template = 'vent.template' if match[0].find('@') >= 0: tool_template = match[0].split('@')[1] + '.template' vent_template_path = join(match_path, tool_template) if exists(vent_template_path): with open(vent_template_path, 'r') as f: vent_template_val = f.read() else: vent_template_val = '' settings_dict = ParsedSections(vent_template_val) for setting in settings_dict: template.set_option(section, setting, json.dumps(settings_dict[setting])) # TODO do we need this if we save as a dictionary? vent_template = Template(vent_template_path) vent_status, response = vent_template.option('info', 'name') instances = vent_template.option('settings', 'instances') if instances[0]: addtl_entries = int(instances[1]) if vent_status: template.set_option(section, 'link_name', response) else: template.set_option(section, 'link_name', true_name.split('/')[-1]) if self.version == 'HEAD': template.set_option(section, 'commit_id', commit_id) if head: # no need to store previous commits if not HEAD, since # the version will always be the same commit ID if previous_commit and previous_commit != commit_id: if (previous_commits and previous_commit not in previous_commits): previous_commits = (previous_commit + ',' + previous_commits) elif not previous_commits: previous_commits = previous_commit if previous_commits and previous_commits != commit_id: template.set_option(section, 'previous_versions', previous_commits) groups = vent_template.option('info', 'groups') if groups[0]: template.set_option(section, 'groups', groups[1]) # set groups to empty string if no groups defined for tool else: template.set_option(section, 'groups', '') # write additional entries for multiple instances if addtl_entries > 1: # add 2 for naming conventions for i in range(2, addtl_entries + 1): addtl_section = section.rsplit(':', 2) addtl_section[0] += str(i) addtl_section = ':'.join(addtl_section) template.add_section(addtl_section) orig_vals = template.section(section)[1] for val in orig_vals: template.set_option(addtl_section, val[0], val[1]) template.set_option(addtl_section, 'name', true_name.split('/')[-1]+str(i)) return status, template, match_path, image_name, section def _build_image(self, template, match_path, image_name, section, build_local=False): status = (True, None) output = '' def set_instances(template, section, built, image_id=None): """ Set build information for multiple instances """ i = 2 while True: addtl_section = section.rsplit(':', 2) addtl_section[0] += str(i) addtl_section = ':'.join(addtl_section) if template.section(addtl_section)[0]: template.set_option(addtl_section, 'built', built) if image_id: template.set_option(addtl_section, 'image_id', image_id) template.set_option(addtl_section, 'last_updated', Timestamp()) else: break i += 1 # determine whether a tool should be considered a multi instance multi_instance = False try: settings = template.option(section, 'settings') if settings[0]: settings_dict = json.loads(settings[1]) if 'instances' in settings_dict and int(settings_dict['instances']) > 1: multi_instance = True except Exception as e: # pragma: no cover self.logger.error( 'Failed to check for multi instance because: {0}'.format(str(e))) status = (False, str(e)) cwd = getcwd() chdir(match_path) try: name = template.option(section, 'name') groups = template.option(section, 'groups') t_type = template.option(section, 'type') path = template.option(section, 'path') status, config_override = self.path_dirs.override_config(path[1]) if groups[1] == '' or not groups[0]: groups = (True, 'none') if not name[0]: name = (True, image_name) pull = False image_exists = False cfg_template = Template(template=self.path_dirs.cfg_file) use_existing_image = False result = cfg_template.option('build-options', 'use_existing_images') if result[0]: use_existing_image = result[1] if use_existing_image == 'yes' and not config_override: try: self.d_client.images.get(image_name) i_attrs = self.d_client.images.get(image_name).attrs image_id = i_attrs['Id'].split(':')[1][:12] template.set_option(section, 'built', 'yes') template.set_option(section, 'image_id', image_id) template.set_option(section, 'last_updated', str(datetime.utcnow()) + ' UTC') # set other instances too if multi_instance: set_instances(template, section, 'yes', image_id) status = (True, 'Found {0}'.format(image_name)) self.logger.info(str(status)) image_exists = True except docker.errors.ImageNotFound: image_exists = False except Exception as e: # pragma: no cover self.logger.warning( 'Failed to query Docker for images because: {0}'.format(str(e))) if not image_exists: # pull if '/' in image_name, fallback to build if '/' in image_name and not build_local and not config_override: try: image = self.d_client.images.pull(image_name) i_attrs = self.d_client.images.get( image_name).attrs image_id = i_attrs['Id'].split(':')[1][:12] if image_id: template.set_option(section, 'built', 'yes') template.set_option(section, 'image_id', image_id) template.set_option(section, 'last_updated', str(datetime.utcnow()) + ' UTC') # set other instances too if multi_instance: set_instances(template, section, 'yes', image_id) status = (True, 'Pulled {0}'.format(image_name)) self.logger.info(str(status)) else: template.set_option(section, 'built', 'failed') template.set_option(section, 'last_updated', str(datetime.utcnow()) + ' UTC') # set other instances too if multi_instance: set_instances(template, section, 'failed') status = (False, 'Failed to pull image {0}'.format( str(output.split('\n')[-1]))) self.logger.warning(str(status)) pull = True except Exception as e: # pragma: no cover self.logger.warning( 'Failed to pull image, going to build instead: {0}'.format(str(e))) status = ( False, 'Failed to pull image because: {0}'.format(str(e))) if not pull and not image_exists: # get username to label built image with username = getpass.getuser() # see if additional file arg needed for building multiple # images from same directory file_tag = 'Dockerfile' multi_tool = template.option(section, 'multi_tool') if multi_tool[0] and multi_tool[1] == 'yes': specific_file = template.option(section, 'name')[1] if specific_file != 'unspecified': file_tag = 'Dockerfile.' + specific_file # update image name with new version for update image_name = image_name.rsplit(':', 1)[0]+':'+self.branch labels = {} labels['vent'] = '' labels['vent.section'] = section labels['vent.repo'] = self.repo labels['vent.type'] = t_type[1] labels['vent.name'] = name[1] labels['vent.groups'] = groups[1] labels['built-by'] = username image = self.d_client.images.build(path='.', dockerfile=file_tag, tag=image_name, labels=labels, rm=True) image_id = image[0].id.split(':')[1][:12] template.set_option(section, 'built', 'yes') template.set_option(section, 'image_id', image_id) template.set_option(section, 'last_updated', str(datetime.utcnow()) + ' UTC') # set other instances too if multi_instance: set_instances(template, section, 'yes', image_id) status = (True, 'Built {0}'.format(image_name)) except Exception as e: # pragma: no cover self.logger.error('Unable to build image {0} because: {1} | {2}'.format( str(image_name), str(e), str(output))) template.set_option(section, 'built', 'failed') template.set_option(section, 'last_updated', str(datetime.utcnow()) + ' UTC') if multi_instance: set_instances(template, section, 'failed') status = ( False, 'Failed to build image because: {0}'.format(str(e))) chdir(cwd) template.set_option(section, 'running', 'no') return status, template def _clone(self, user=None, pw=None): status = (True, None) try: # if path already exists, ignore try: path, _, _ = self.path_dirs.get_path(self.repo) chdir(path) return status except Exception as e: # pragma: no cover self.logger.debug("Repo doesn't exist, attempting to clone.") status = self.path_dirs.apply_path(self.repo) if not status[0]: self.logger.error( 'Unable to clone because: {0}'.format(str(status[1]))) return status repo = self.repo # check if user and pw were supplied, typically for private repos if user and pw: # only https is supported when using user/pw auth_repo = 'https://' + user + ':' + pw + '@' repo = auth_repo + repo.split('https://')[-1] # clone repo check_output(shlex.split('env GIT_SSL_NO_VERIFY=true git clone --recursive ' + repo + ' .'), stderr=STDOUT, close_fds=True).decode('utf-8') chdir(status[1]) status = (True, 'Successfully cloned: {0}'.format(self.repo)) except Exception as e: # pragma: no cover e_str = str(e) # scrub username and password from error message if e_str.find('@') >= 0: e_str = e_str[:e_str.find('//') + 2] + \ e_str[e_str.find('@') + 1:] self.logger.error('Clone failed with error: {0}'.format(e_str)) status = (False, e_str) return status def update(self, repo, tools=None): # TODO return
class Action: """ Handle actions in menu """ def __init__(self, **kargs): self.plugin = Plugin(**kargs) self.d_client = self.plugin.d_client self.vent_config = os.path.join(self.plugin.path_dirs.meta_dir, "vent.cfg") self.logger = Logger(__name__) def add(self, repo, tools=None, overrides=None, version="HEAD", branch="master", build=True, user=None, pw=None, groups=None, version_alias=None, wild=None, remove_old=True, disable_old=True): """ Add a new set of tool(s) """ self.logger.info("Starting: add") status = (True, None) try: status = self.plugin.add(repo, tools=tools, overrides=overrides, version=version, branch=branch, build=build, user=user, pw=pw, groups=groups, version_alias=version_alias, wild=wild, remove_old=remove_old, disable_old=disable_old) except Exception as e: # pragma: no cover self.logger.error("add failed with error: "+str(e)) status = (False, e) self.logger.info("Status of add: "+str(status)) self.logger.info("Finished: add") return status def remove(self, repo=None, namespace=None, name=None, groups=None, enabled="yes", branch="master", version="HEAD", built="yes"): """ Remove tools or a repo """ self.logger.info("Starting: remove") status = (True, None) try: status = self.plugin.remove(name=name, repo=repo, namespace=namespace, groups=groups, enabled=enabled, branch=branch, version=version, built=built) except Exception as e: self.logger.error("remove failed with error: "+str(e)) status = (False, e) self.logger.info("Status of remove: " + str(status)) self.logger.info("Finished: remove") return status def prep_start(self, repo=None, name=None, groups=None, enabled="yes", branch="master", version="HEAD", run_build=False): """ Start a set of tools that match the parameters given, if no parameters are given, start all installed tools on the master branch at verison HEAD that are enabled """ args = locals() self.logger.info("Starting: prep_start") self.logger.info("Arguments: "+str(args)) status = (True, None) tool_dict = {} try: del args['run_build'] options = ['name', 'namespace', 'built', 'groups', 'path', 'image_name', 'branch', 'version'] vent_config = Template(template=self.vent_config) files = vent_config.option('main', 'files') sections, template = self.plugin.constraint_options(args, options) for section in sections: # initialize needed vars template_path = os.path.join(sections[section]['path'], 'vent.template') container_name = sections[section]['image_name'].replace(':','-') container_name = container_name.replace('/','-') image_name = sections[section]['image_name'] # checkout the right version and branch of the repo self.plugin.branch = branch self.plugin.version = version cwd = os.getcwd() self.logger.info("current directory is: "+str(cwd)) os.chdir(os.path.join(sections[section]['path'])) status = self.plugin.checkout() self.logger.info(status) os.chdir(cwd) if run_build: status = self.build(name=sections[section]['name'], groups=groups, enabled=enabled, branch=branch, version=version) self.logger.info(status) # set docker settings for container vent_template = Template(template_path) status = vent_template.section('docker') self.logger.info(status) tool_dict[container_name] = {'image':image_name, 'name':container_name} if status[0]: for option in status[1]: options = option[1] # check for commands to evaluate if '`' in options: cmds = options.split('`') # TODO this probably needs better error checking to handle mismatched `` if len(cmds) > 2: i = 1 while i < len(cmds): try: cmds[i] = subprocess.check_output(shlex.split(cmds[i]), stderr=subprocess.STDOUT, close_fds=True).strip() except Exception as e: # pragma: no cover self.logger.error("unable to evaluate command specified in vent.template: "+str(e)) i += 2 options = "".join(cmds) # store options set for docker try: tool_dict[container_name][option[0]] = ast.literal_eval(options) except Exception as e: # pragma: no cover self.logger.error("unable to store the options set for docker: "+str(e)) tool_dict[container_name][option[0]] = options # get temporary name for links, etc. status = vent_template.section('info') self.logger.info(status) plugin_config = Template(template=self.plugin.manifest) status, plugin_sections = plugin_config.sections() self.logger.info(status) for plugin_section in plugin_sections: status = plugin_config.option(plugin_section, "link_name") self.logger.info(status) image_status = plugin_config.option(plugin_section, "image_name") self.logger.info(image_status) if status[0] and image_status[0]: cont_name = image_status[1].replace(':','-') cont_name = cont_name.replace('/','-') if cont_name not in tool_dict: tool_dict[cont_name] = {'image':image_status[1], 'name':cont_name, 'start':False} tool_dict[cont_name]['tmp_name'] = status[1] # add extra labels if 'labels' not in tool_dict[container_name]: tool_dict[container_name]['labels'] = {} tool_dict[container_name]['labels']['vent'] = Version() tool_dict[container_name]['labels']['vent.namespace'] = sections[section]['namespace'] tool_dict[container_name]['labels']['vent.branch'] = branch tool_dict[container_name]['labels']['vent.version'] = version tool_dict[container_name]['labels']['vent.name'] = sections[section]['name'] if 'groups' in sections[section]: # add labels for groups tool_dict[container_name]['labels']['vent.groups'] = sections[section]['groups'] # send logs to syslog if 'syslog' not in sections[section]['groups'] and 'core' in sections[section]['groups']: tool_dict[container_name]['log_config'] = {'type':'syslog', 'config': {'syslog-address':'tcp://0.0.0.0:514', 'syslog-facility':'daemon', 'tag':'core'}} if 'syslog' not in sections[section]['groups']: tool_dict[container_name]['log_config'] = {'type':'syslog', 'config': {'syslog-address':'tcp://0.0.0.0:514', 'syslog-facility':'daemon', 'tag':'plugin'}} # mount necessary directories if 'files' in sections[section]['groups']: if 'volumes' in tool_dict[container_name]: tool_dict[container_name]['volumes'][self.plugin.path_dirs.base_dir[:-1]] = {'bind': '/vent', 'mode': 'ro'} else: tool_dict[container_name]['volumes'] = {self.plugin.path_dirs.base_dir[:-1]: {'bind': '/vent', 'mode': 'ro'}} if files[0]: tool_dict[container_name]['volumes'][files[1]] = {'bind': '/files', 'mode': 'ro'} else: tool_dict[container_name]['log_config'] = {'type':'syslog', 'config': {'syslog-address':'tcp://0.0.0.0:514', 'syslog-facility':'daemon', 'tag':'plugin'}} # add label for priority status = vent_template.section('settings') self.logger.info(status) if status[0]: for option in status[1]: if option[0] == 'priority': tool_dict[container_name]['labels']['vent.priority'] = option[1] # only start tools that have been built if sections[section]['built'] != 'yes': del tool_dict[container_name] # check and update links, volumes_from, network_mode for container in tool_dict.keys(): if 'links' in tool_dict[container]: for link in tool_dict[container]['links']: for c in tool_dict.keys(): if 'tmp_name' in tool_dict[c] and tool_dict[c]['tmp_name'] == link: tool_dict[container]['links'][tool_dict[c]['name']] = tool_dict[container]['links'].pop(link) if 'volumes_from' in tool_dict[container]: tmp_volumes_from = tool_dict[container]['volumes_from'] tool_dict[container]['volumes_from'] = [] for volumes_from in list(tmp_volumes_from): for c in tool_dict.keys(): if 'tmp_name' in tool_dict[c] and tool_dict[c]['tmp_name'] == volumes_from: tool_dict[container]['volumes_from'].append(tool_dict[c]['name']) tmp_volumes_from.remove(volumes_from) tool_dict[container]['volumes_from'] += tmp_volumes_from if 'network_mode' in tool_dict[container]: if tool_dict[container]['network_mode'].startswith('container:'): network_c_name = tool_dict[container]['network_mode'].split('container:')[1] for c in tool_dict.keys(): if 'tmp_name' in tool_dict[c] and tool_dict[c]['tmp_name'] == network_c_name: tool_dict[container]['network_mode'] = 'container:'+tool_dict[c]['name'] # remove tmp_names for c in tool_dict.keys(): if 'tmp_name' in tool_dict[c]: del tool_dict[c]['tmp_name'] # remove containers that shouldn't be started for c in tool_dict.keys(): if 'start' in tool_dict[c] and not tool_dict[c]['start']: del tool_dict[c] except Exception as e: self.logger.error("prep_start failed with error: "+str(e)) status = (False, e) status = (True, tool_dict) self.logger.info("Status of prep_start: "+str(status)) self.logger.info("Finished: prep_start") return status def start(self, tool_dict): """ Start a set of tools that match the parameters given, if no parameters are given, start all installed tools on the master branch at verison HEAD that are enabled """ self.logger.info("Starting: start") status = (True, None) try: # check start priorities (priority of groups is alphabetical for now) group_orders = {} groups = [] containers_remaining = [] for container in tool_dict: containers_remaining.append(container) if 'labels' in tool_dict[container]: if 'vent.groups' in tool_dict[container]['labels']: groups += tool_dict[container]['labels']['vent.groups'].split(',') if 'vent.priority' in tool_dict[container]['labels']: priorities = tool_dict[container]['labels']['vent.priority'].split(',') container_groups = tool_dict[container]['labels']['vent.groups'].split(',') for i, priority in enumerate(priorities): if container_groups[i] not in group_orders: group_orders[container_groups[i]] = [] group_orders[container_groups[i]].append((int(priority), container)) containers_remaining.remove(container) # start containers based on priorities groups = sorted(set(groups)) started_containers = [] for group in groups: if group in group_orders: for container_tuple in sorted(group_orders[group]): if container_tuple[1] not in started_containers: started_containers.append(container_tuple[1]) try: try: container = self.d_client.containers.get(container_tuple[1]) container.start() self.logger.info("started "+str(container_tuple[1])+" with ID: "+str(container.short_id)) except Exception as err: # pragma: no cover self.logger.error(str(err)) container_id = self.d_client.containers.run(detach=True, **tool_dict[container_tuple[1]]) self.logger.info("started "+str(container_tuple[1])+" with ID: "+str(container_id)) except Exception as e: # pragma: no cover self.logger.error("failed to start "+str(container_tuple[1])+" because: "+str(e)) # start the rest of the containers that didn't have any priorities set for container in containers_remaining: try: try: c = self.d_client.containers.get(container) c.start() self.logger.info("started "+str(container)+" with ID: "+str(c.short_id)) except Exception as err: # pragma: no cover self.logger.error(str(err)) container_id = self.d_client.containers.run(detach=True, **tool_dict[container]) self.logger.info("started "+str(container)+" with ID: "+str(container_id)) except Exception as e: # pragma: no cover self.logger.error("failed to start "+str(container)+" because: "+str(e)) except Exception as e: self.logger.error("start failed with error: "+str(e)) status = (False, e) self.logger.info("Status of start: "+str(status)) self.logger.info("Finished: start") return status def update(self, repo=None, name=None, groups=None, enabled="yes", branch="master", version="HEAD"): """ Update a set of tools that match the parameters given, if no parameters are given, updated all installed tools on the master branch at verison HEAD that are enabled """ args = locals() self.logger.info("Starting: update") self.logger.info(args) status = (True, None) try: options = ['path', 'image_name', 'image_id'] sections, template = self.plugin.constraint_options(args, options) # get existing containers and images and states running_containers = Containers() built_images = Images() self.logger.info("running docker containers: "+str(running_containers)) self.logger.info("built docker images: "+str(built_images)) # if repo, pull and build # if registry image, pull for section in sections: try: cwd = os.getcwd() self.logger.info("current working directory: "+str(cwd)) os.chdir(sections[section]['path']) self.plugin.version = version self.plugin.branch = branch c_status = self.plugin.checkout() self.logger.info(c_status) try: os.chdir(cwd) except Exception as e: # pragma: no cover self.logger.error("unable to change directory: "+str(e)) pass template = self.plugin.builder(template, sections[section]['path'], sections[section]['image_name'], section, build=True, branch=branch, version=version) self.logger.info(template) # stop and remove old containers and images if image_id updated # !! TODO # start containers if they were running # !! TODO # TODO logging except Exception as e: # pragma: no cover self.logger.error("unable to update: "+str(section)+" because: "+str(e)) template.write_config() except Exception as e: self.logger.error("update failed with error: "+str(e)) status = (False, e) self.logger.info("Status of update: "+str(status)) self.logger.info("Finished: update") return status def stop(self, repo=None, name=None, groups=None, enabled="yes", branch="master", version="HEAD"): """ Stop a set of tools that match the parameters given, if no parameters are given, stop all installed tools on the master branch at verison HEAD that are enabled """ args = locals() self.logger.info("Starting: stop") self.logger.info(args) status = (True, None) try: # !! TODO need to account for plugin containers that have random names, use labels perhaps options = ['name', 'namespace', 'built', 'groups', 'path', 'image_name', 'branch', 'version'] sections, template = self.plugin.constraint_options(args, options) self.logger.info(sections) for section in sections: container_name = sections[section]['image_name'].replace(':','-') container_name = container_name.replace('/','-') try: container = self.d_client.containers.get(container_name) container.stop() self.logger.info("stopped "+str(container_name)) except Exception as e: # pragma: no cover self.logger.error("failed to stop "+str(container_name)+" because: "+str(e)) except Exception as e: self.logger.error("stop failed with error: "+str(e)) status = (False, e) self.logger.info("Status of stop: "+str(status)) self.logger.info("Finished: stop") return status def clean(self, repo=None, name=None, groups=None, enabled="yes", branch="master", version="HEAD"): """ Clean (stop and remove) a set of tools that match the parameters given, if no parameters are given, clean all installed tools on the master branch at verison HEAD that are enabled """ args = locals() self.logger.info("Starting: clean") self.logger.info(args) status = (True, None) try: # !! TODO need to account for plugin containers that have random names, use labels perhaps options = ['name', 'namespace', 'built', 'groups', 'path', 'image_name', 'branch', 'version'] sections, template = self.plugin.constraint_options(args, options) self.logger.info(sections) for section in sections: container_name = sections[section]['image_name'].replace(':','-') container_name = container_name.replace('/','-') try: container = self.d_client.containers.get(container_name) container.remove(force=True) self.logger.info("cleaned "+str(container_name)) except Exception as e: # pragma: no cover self.logger.error("failed to clean "+str(container_name)+" because: "+str(e)) except Exception as e: self.logger.error("clean failed with error: "+str(e)) status = (False, e) self.logger.info("Status of clean: "+ str(status)) self.logger.info("Finished: clean") return status def build(self, repo=None, name=None, groups=None, enabled="yes", branch="master", version="HEAD"): """ Build a set of tools that match the parameters given """ args = locals() self.logger.info("Starting: build") self.logger.info(args) status = (True, None) try: options = ['image_name', 'path'] sections, template = self.plugin.constraint_options(args, options) self.logger.info(sections) for section in sections: self.logger.info("Building "+str(section)+" ...") template = self.plugin.builder(template, sections[section]['path'], sections[section]['image_name'], section, build=True, branch=branch, version=version) template.write_config() except Exception as e: self.logger.error("build failed with error: "+str(e)) status = (False, e) self.logger.info("Status of build: "+str(status)) self.logger.info("Finished: build") return status def cores(self, action, branch="master"): """ Supply action (install, build, start, stop, clean) for core tools """ self.logger.info("Starting: cores") status = (True, None) try: self.logger.info("action provided: "+str(action)) core = Core(branch=branch) if action in ["install", "build"]: tools = [] plugins = Plugin(plugins_dir=".internals/plugins") plugins.version = 'HEAD' plugins.branch = branch plugins.apply_path('https://github.com/cyberreboot/vent') response = plugins.checkout() self.logger.info("status of plugin checkout "+str(response)) matches = plugins._available_tools(groups='core') for match in matches: tools.append((match[0], '')) status = plugins.add('https://github.com/cyberreboot/vent', tools=tools, branch=branch, build=False) self.logger.info("status of plugin add: "+str(status)) plugin_config = Template(template=self.plugin.manifest) sections = plugin_config.sections() for tool in core['normal']: for section in sections[1]: name = plugin_config.option(section, "name") orig_branch = plugin_config.option(section, "branch") namespace = plugin_config.option(section, "namespace") version = plugin_config.option(section, "version") if name[1] == tool and orig_branch[1] == branch and namespace[1] == "cyberreboot/vent" and version[1] == "HEAD": plugin_config.set_option(section, "image_name", "cyberreboot/vent-"+tool+":"+branch) plugin_config.write_config() if action == "build": plugin_config = Template(template=self.plugin.manifest) sections = plugin_config.sections() try: for tool in core['normal']: for section in sections[1]: image_name = plugin_config.option(section, "image_name") if image_name[1] == "cyberreboot/vent-"+tool+":"+branch: try: # currently can't use docker-py because it # returns a 404 on pull so no way to valid if it # worked or didn't #image_id = self.d_client.images.pull('cyberreboot/vent-'+tool, tag=branch) image_id = None output = subprocess.check_output(shlex.split("docker pull cyberreboot/vent-"+tool+":"+branch), stderr=subprocess.STDOUT) for line in output.split('\n'): if line.startswith("Digest: sha256:"): image_id = line.split("Digest: sha256:")[1][:12] if image_id: plugin_config.set_option(section, "built", "yes") plugin_config.set_option(section, "image_id", image_id) plugin_config.set_option(section, "last_updated", str(datetime.datetime.utcnow()) + " UTC") status = (True, "Pulled "+tool) self.logger.info(str(status)) else: plugin_config.set_option(section, "built", "failed") plugin_config.set_option(section, "last_updated", str(datetime.datetime.utcnow()) + " UTC") status = (False, "Failed to pull image "+str(output.split('\n')[-1])) self.logger.error(str(status)) except Exception as e: # pragma: no cover plugin_config.set_option(section, "built", "failed") plugin_config.set_option(section, "last_updated", str(datetime.datetime.utcnow()) + " UTC") status = (False, "Failed to pull image "+str(e)) self.logger.error(str(status)) except Exception as e: # pragma: no cover status = (False, "Failed to pull images "+str(e)) self.logger.error(str(status)) plugin_config.write_config() elif action == "start": status = self.prep_start(groups="core", branch=branch) if status[0]: tool_dict = status[1] status = self.start(tool_dict) elif action == "stop": status = self.stop(groups="core", branch=branch) elif action == "clean": status = self.clean(groups="core", branch=branch) except Exception as e: self.logger.info("core failed with error: "+str(e)) status = (False, e) self.logger.info("Status of core: "+str(status)) self.logger.info("Finished: core") return status @staticmethod def backup(): # TODO return @staticmethod def restore(): # TODO return @staticmethod def configure(): # TODO # tools, core, etc. return @staticmethod def system_commands(): # reset, upgrade, etc. return def logs(self, container_type=None, grep_list=None): """ generically filter logs stored in log containers """ self.logger.info("Starting: logs") status = (True, None) log_entries = {} try: containers = self.d_client.containers.list(all=True, filters={'label':'vent'}) self.logger.info("containers found: "+str(containers)) if grep_list: compare_containers = containers if container_type: try: compare_containers = [c for c in containers if (container_type in c.attrs['Config']['Labels']['vent.groups'])] except Exception as e: # pragma: no cover self.logger.error("Unable to limit containers by container_type: "+str(container_type)+" because: "+str(e)) for expression in grep_list: for container in compare_containers: try: # 'logs' stores each line which contains the expression logs = [log for log in container.logs().split("\n") if expression in log] for log in logs: if str(container.name) in log_entries: log_entries[str(container.name)].append(log) else: log_entries[str(container.name)] = [log] except Exception as e: # pragma: no cover self.logger.error("Unable to get logs for "+str(container.name)+" because: "+str(e)) else: compare_containers = containers if container_type: try: compare_containers = [c for c in containers if (container_type in c.attrs['Config']['Labels']['vent.groups'])] except Exception as e: # pragma: no cover self.logger.error("Unable to limit containers by container_type: "+str(container_type)+" because: "+str(e)) for container in compare_containers: try: logs = container.logs().split("\n") for log in logs: if str(container.name) in log_entries: log_entries[str(container.name)].append(log) else: log_entries[str(container.name)] = [log] except Exception as e: # pragma: no cover self.logger.error("Unable to get logs for "+str(container.name)+" because: "+str(e)) status = (True, log_entries) except Exception as e: self.logger.error("logs failed with error: "+str(e)) status = (False, e) self.logger.info("Status of logs: "+str(status)) self.logger.info("Finished: logs") return status @staticmethod def help(): # TODO return def inventory(self, choices=None): """ Return a dictionary of the inventory items and status """ self.logger.info("Starting: inventory") status = (True, None) self.logger.info("choices specified: "+str(choices)) try: # choices: repos, core, tools, images, built, running, enabled items = {'repos':[], 'core':[], 'tools':[], 'images':[], 'built':[], 'running':[], 'enabled':[]} tools = self.plugin.tools() self.logger.info("found tools: "+str(tools)) for choice in choices: for tool in tools: try: if choice == 'repos': if 'repo' in tool: if tool['repo'] and tool['repo'] not in items[choice]: items[choice].append(tool['repo']) elif choice == 'core': if 'groups' in tool: if 'core' in tool['groups']: items[choice].append((tool['section'], tool['name'])) elif choice == 'tools': items[choice].append((tool['section'], tool['name'])) elif choice == 'images': # TODO also check against docker images = Images() items[choice].append((tool['section'], tool['name'], tool['image_name'])) elif choice == 'built': items[choice].append((tool['section'], tool['name'], tool['built'])) elif choice == 'running': containers = Containers() status = 'not running' for container in containers: image_name = tool['image_name'].rsplit(":"+tool['version'], 1)[0] image_name = image_name.replace(':', '-') image_name = image_name.replace('/', '-') if container[0] == image_name: status = container[1] items[choice].append((tool['section'], tool['name'], status)) elif choice == 'enabled': items[choice].append((tool['section'], tool['name'], tool['enabled'])) else: # unknown choice pass except Exception as e: # pragma: no cover self.logger.error("unable to grab information about tool: "+str(tool)+" because: "+str(e)) pass status = (True, items) except Exception as e: self.logger.error("inventory failed with error: "+str(e)) status = (False, e) self.logger.info("Status of inventory: "+str(status)) self.logger.info("Finished: inventory") return status
def __init__(self, **kargs): self.plugin = Plugin(**kargs) self.d_client = self.plugin.d_client self.vent_config = os.path.join(self.plugin.path_dirs.meta_dir, "vent.cfg") self.logger = Logger(__name__)
def __init__(self, **kargs): self.path_dirs = PathDirs(**kargs) self.manifest = os.path.join(self.path_dirs.meta_dir, "plugin_manifest.cfg") self.d_client = docker.from_env() self.logger = Logger(__name__)
class System: def __init__(self, *args, **kwargs): self.d_client = docker.from_env() self.path_dirs = PathDirs(**kwargs) self.manifest = join(self.path_dirs.meta_dir, 'plugin_manifest.cfg') self.vent_config = self.path_dirs.cfg_file self.startup_file = self.path_dirs.startup_file self.logger = Logger(__name__) self._auto_install() def _auto_install(self): """ Automatically detects images and installs them in the manifest if they are not there already """ template = Template(template=self.manifest) sections = template.sections() images = self.d_client.images.list(filters={'label': 'vent'}) add_sections = [] status = (True, None) for image in images: ignore = False if ('Labels' in image.attrs['Config'] and 'vent.section' in image.attrs['Config']['Labels'] and not image.attrs['Config']['Labels']['vent.section'] in sections[1]): section = image.attrs['Config']['Labels']['vent.section'] section_str = image.attrs['Config']['Labels']['vent.section'].split( ':') template.add_section(section) if 'vent.name' in image.attrs['Config']['Labels']: template.set_option(section, 'name', image.attrs['Config']['Labels']['vent.name']) if 'vent.repo' in image.attrs['Config']['Labels']: template.set_option(section, 'repo', image.attrs['Config']['Labels']['vent.repo']) git_path = join(self.path_dirs.plugins_dir, '/'.join(section_str[:2])) # TODO clone it down template.set_option(section, 'path', join( git_path, section_str[-3][1:])) # get template settings # TODO account for template files not named vent.template v_template = Template(template=join( git_path, section_str[-3][1:], 'vent.template')) tool_sections = v_template.sections() if tool_sections[0]: for s in tool_sections[1]: section_dict = {} options = v_template.options(s) if options[0]: for option in options[1]: option_name = option if option == 'name': # get link name template.set_option(section, 'link_name', v_template.option(s, option)[1]) option_name = 'link_name' opt_val = v_template.option(s, option)[1] section_dict[option_name] = opt_val if section_dict: template.set_option(section, s, json.dumps(section_dict)) if ('vent.type' in image.attrs['Config']['Labels'] and image.attrs['Config']['Labels']['vent.type'] == 'repository'): template.set_option( section, 'namespace', '/'.join(section_str[:2])) template.set_option(section, 'branch', section_str[-2]) template.set_option(section, 'version', section_str[-1]) template.set_option(section, 'last_updated', str( datetime.utcnow()) + ' UTC') if image.attrs['RepoTags']: template.set_option( section, 'image_name', image.attrs['RepoTags'][0]) else: # image with none tag is outdated, don't add it ignore = True template.set_option(section, 'type', 'repository') if 'vent.groups' in image.attrs['Config']['Labels']: template.set_option(section, 'groups', image.attrs['Config']['Labels']['vent.groups']) template.set_option(section, 'built', 'yes') template.set_option(section, 'image_id', image.attrs['Id'].split(':')[1][:12]) template.set_option(section, 'running', 'no') # check if image is running as a container containers = self.d_client.containers.list( filters={'label': 'vent'}) for container in containers: if container.attrs['Image'] == image.attrs['Id']: template.set_option(section, 'running', 'yes') if not ignore: add_sections.append(section) template.write_config() # TODO this check will always be true, need to actually validate the above logic if status[0]: status = (True, add_sections) return status def backup(self): """ Saves the configuration information of the current running vent instance to be used for restoring at a later time """ status = (True, None) # initialize all needed variables (names for backup files, etc.) backup_name = ('.vent-backup-' + '-'.join(Timestamp().split(' '))) backup_dir = join(expanduser('~'), backup_name) backup_manifest = join(backup_dir, 'backup_manifest.cfg') backup_vcfg = join(backup_dir, 'backup_vcfg.cfg') manifest = self.manifest # create new backup directory try: mkdir(backup_dir) except Exception as e: # pragma: no cover self.logger.error(str(e)) return (False, str(e)) # create new files in backup directory try: # backup manifest with open(backup_manifest, 'w') as bmanifest: with open(manifest, 'r') as manifest_file: bmanifest.write(manifest_file.read()) # backup vent.cfg with open(backup_vcfg, 'w') as bvcfg: with open(self.vent_config, 'r') as vcfg_file: bvcfg.write(vcfg_file.read()) self.logger.info('Backup information written to ' + backup_dir) status = (True, backup_dir) except Exception as e: # pragma: no cover self.logger.error("Couldn't backup vent: " + str(e)) status = (False, str(e)) # TODO #266 return status def configure(self): # TODO return def gpu(self): # TODO return def history(self): # TODO #255 return def restore(self, backup_dir): """ Restores a vent configuration from a previously backed up version """ # TODO #266 status = (True, None) return status def reset(self): """ Factory reset all of Vent's user data, containers, and images """ status = (True, None) error_message = '' # remove containers try: c_list = set(self.d_client.containers.list( filters={'label': 'vent'}, all=True)) for c in c_list: c.remove(force=True) except Exception as e: # pragma: no cover error_message += 'Error removing Vent containers: ' + str(e) + '\n' # remove images try: i_list = set(self.d_client.images.list(filters={'label': 'vent'}, all=True)) for i in i_list: # delete tagged images only because they are the parents for # the untagged images. Remove the parents and the children get # removed automatically if i.attrs['RepoTags']: self.d_client.images.remove(image=i.id, force=True) except Exception as e: # pragma: no cover error_message += 'Error deleting Vent images: ' + str(e) + '\n' # remove .vent folder try: cwd = getcwd() if cwd.startswith(join(expanduser('~'), '.vent')): chdir(expanduser('~')) shutil.rmtree(join(expanduser('~'), '.vent')) except Exception as e: # pragma: no cover error_message += 'Error deleting Vent data: ' + str(e) + '\n' if error_message: status = (False, error_message) return status def rollback(self): # TODO #266 return def start(self): status = (True, None) vent_bridge = None # create vent network bridge if it doesn't already exist try: vent_bridge = self.d_client.networks.create( 'vent', check_duplicate=True, driver='bridge') except docker.errors.APIError as e: # pragma: no cover if str(e) != '409 Client Error: Conflict ("network with name vent already exists")': self.logger.error( 'Unable to create network bridge because: {0}'.format(str(e))) status = (False, str(e)) else: vent_bridge = self.d_client.networks.list('vent')[0] if status[0]: # add vent to the vent network bridge try: vent_bridge.connect(environ['HOSTNAME']) except Exception as e: # pragma: no coverr self.logger.error( 'Unable to connect vent to the network bridge because: {0}'.format(str(e))) status = (False, str(e)) if status[0]: # remove vent to the default network bridge try: default_bridge = self.d_client.networks.list('bridge')[0] default_bridge.disconnect(environ['HOSTNAME']) except Exception as e: # pragma: no coverr self.logger.error( 'Unable to disconnect vent from the default network bridge because: {0}'.format(str(e))) status = (False, str(e)) if status[0]: # startup based on startup file if exists(self.startup_file): status = self._startup() else: tools = Tools() status = tools.new('core', None) if status[0]: status = tools.start( 'https://github.com/cyberreboot/vent', None) return status def _startup(self): """ Automatically detect if a startup file is specified and stand up a vent host with all necessary tools based on the specifications in that file """ status = (True, None) try: s_dict = {} # rewrite the yml file to exclusively lowercase with open(self.startup_file, 'r') as sup: vent_startup = sup.read() with open(self.startup_file, 'w') as sup: for line in vent_startup: sup.write(line.lower()) with open(self.startup_file, 'r') as sup: s_dict = yaml.safe_load(sup.read()) if 'vent.cfg' in s_dict: v_cfg = Template(self.vent_config) for section in s_dict['vent.cfg']: for option in s_dict['vent.cfg'][section]: val = ('no', 'yes')[ s_dict['vent.cfg'][section][option]] v_status = v_cfg.add_option(section, option, value=val) if not v_status[0]: v_cfg.set_option(section, option, val) v_cfg.write_config() del s_dict['vent.cfg'] tool_d = {} extra_options = ['info', 'service', 'settings', 'docker', 'gpu'] s_dict_c = copy.deepcopy(s_dict) # TODO check for repo or image type for repo in s_dict_c: repository = Repository(System().manifest) repository.repo = repo repository._clone() repo_path, org, r_name = self.path_dirs.get_path(repo) get_tools = [] for tool in s_dict_c[repo]: t_branch = 'master' t_version = 'HEAD' if 'branch' in s_dict[repo][tool]: t_branch = s_dict[repo][tool]['branch'] if 'version' in s_dict[repo][tool]: t_version = s_dict[repo][tool]['version'] get_tools.append((tool, t_branch, t_version)) available_tools = AvailableTools(repo_path, tools=get_tools) for tool in s_dict_c[repo]: # if we can't find the tool in that repo, skip over this # tool and notify in the logs t_path, t_path_cased = PathDirs.rel_path( tool, available_tools) if t_path is None: self.logger.error("Couldn't find tool " + tool + ' in' ' repo ' + repo) continue # ensure no NoneType iteration errors if s_dict_c[repo][tool] is None: s_dict[repo][tool] = {} # check if we need to configure instances along the way instances = 1 if 'settings' in s_dict[repo][tool]: if 'instances' in s_dict[repo][tool]['settings']: instances = int(s_dict[repo][tool] ['settings']['instances']) # add the tool t_branch = 'master' t_version = 'HEAD' t_image = None add_tools = None add_tools = [(t_path_cased, '')] if 'branch' in s_dict[repo][tool]: t_branch = s_dict[repo][tool]['branch'] if 'version' in s_dict[repo][tool]: t_version = s_dict[repo][tool]['version'] if 'image' in s_dict[repo][tool]: t_image = s_dict[repo][tool]['image'] repository.add( repo, tools=add_tools, branch=t_branch, version=t_version, image_name=t_image) manifest = Template(self.manifest) # update the manifest with extra defined runtime settings base_section = ':'.join([org, r_name, t_path, t_branch, t_version]) for option in extra_options: if option in s_dict[repo][tool]: opt_dict = manifest.option(base_section, option) # add new values defined into default options for # that tool, don't overwrite them if opt_dict[0]: opt_dict = json.loads(opt_dict[1]) else: opt_dict = {} # stringify values for vent for v in s_dict[repo][tool][option]: pval = s_dict[repo][tool][option][v] s_dict[repo][tool][option][v] = json.dumps( pval) opt_dict.update(s_dict[repo][tool][option]) manifest.set_option(base_section, option, json.dumps(opt_dict)) # copy manifest info into new sections if necessary if instances > 1: for i in range(2, instances + 1): i_section = base_section.rsplit(':', 2) i_section[0] += str(i) i_section = ':'.join(i_section) manifest.add_section(i_section) for opt_val in manifest.section(base_section)[1]: if opt_val[0] == 'name': manifest.set_option(i_section, opt_val[0], opt_val[1] + str(i)) else: manifest.set_option(i_section, opt_val[0], opt_val[1]) manifest.write_config() tool_d = {} tools = Tools() # start tools, if necessary for repo in s_dict: for tool in s_dict[repo]: if 'start' in s_dict[repo][tool]: if s_dict[repo][tool]['start']: local_instances = 1 if 'settings' in s_dict[repo][tool] and 'instances' in s_dict[repo][tool]['settings']: local_instances = int( s_dict[repo][tool]['settings']['instances']) t_branch = 'master' t_version = 'HEAD' if 'branch' in s_dict[repo][tool]: t_branch = s_dict[repo][tool]['branch'] if 'version' in s_dict[repo][tool]: t_version = s_dict[repo][tool]['version'] for i in range(1, local_instances + 1): i_name = tool + str(i) if i != 1 else tool i_name = i_name.replace('@', '') tool_d.update( tools._prep_start(repo, i_name)[1]) if tool_d: tools.start(tool_d, None, is_tool_d=True) except Exception as e: # pragma: no cover self.logger.error('Startup failed because: {0}'.format(str(e))) status = (False, str(e)) return status def stop(self): status = (True, None) # remove containers try: c_list = set(self.d_client.containers.list( filters={'label': 'vent'}, all=True)) for c in c_list: c.remove(force=True) except Exception as e: # pragma: no cover status = (False, str(e)) return status def get_configure(self, repo=None, name=None, groups=None, main_cfg=False): """ Get the vent.template settings for a given tool by looking at the plugin_manifest """ constraints = locals() del constraints['main_cfg'] status = (True, None) template_dict = {} return_str = '' if main_cfg: vent_cfg = Template(self.vent_config) for section in vent_cfg.sections()[1]: template_dict[section] = {} for vals in vent_cfg.section(section)[1]: template_dict[section][vals[0]] = vals[1] else: # all possible vent.template options stored in plugin_manifest options = ['info', 'service', 'settings', 'docker', 'gpu'] tools = Template(System().manifest).constrain_opts( constraints, options)[0] if tools: # should only be one tool tool = list(tools.keys())[0] # load all vent.template options into dict for section in tools[tool]: template_dict[section] = json.loads(tools[tool][section]) else: status = (False, "Couldn't get vent.template information") if status[0]: # display all those options as they would in the file for section in template_dict: return_str += '[' + section + ']\n' # ensure instances shows up in configuration for option in template_dict[section]: if option.startswith('#'): return_str += option + '\n' else: return_str += option + ' = ' return_str += template_dict[section][option] + '\n' return_str += '\n' # only one newline at end of file status = (True, return_str[:-1]) return status def save_configure(self, repo=None, name=None, groups=None, config_val='', from_registry=False, main_cfg=False, instances=1): """ Save changes made to vent.template through npyscreen to the template and to plugin_manifest """ def template_to_manifest(vent_template, manifest, tool, instances): """ Helper function to transfer information from vent.template to plugin_manifest """ sections = vent_template.sections() if sections[0]: for section in sections[1]: section_dict = {} if section == 'settings': section_dict.update({'instances': str(instances)}) options = vent_template.options(section) if options[0]: for option in options[1]: option_name = option if option == 'name': option_name = 'link_name' opt_val = vent_template.option(section, option)[1] section_dict[option_name] = opt_val if section_dict: manifest.set_option(tool, section, json.dumps(section_dict)) elif manifest.option(tool, section)[0]: manifest.del_option(tool, section) constraints = locals() del constraints['config_val'] del constraints['from_registry'] del constraints['main_cfg'] del constraints['instances'] del constraints['template_to_manifest'] status = (True, None) fd = None # ensure instances is an int and remove instances from config_val to # ensure correct info instances = int(instances) config_val = re.sub(r'instances\ *=\ *\d+\n', '', config_val) api_system = System() manifest = api_system.manifest if not main_cfg: if not from_registry: # creating new instances if instances > 1: fd, template_path = tempfile.mkstemp(suffix='.template') # scrub name for clean section name if re.search(r'\d+$', name): name = re.sub(r'\d+$', '', name) t_identifier = {'name': name, 'branch': branch, 'version': version} result = Template(manifest).constrain_opts( t_identifier, []) tools = result[0] tool = list(tools.keys())[0] else: options = ['path', 'multi_tool', 'name'] tools, _ = Template(manifest).constrain_opts( constraints, options) # only one tool in tools because perform this function for # every tool if tools: tool = list(tools.keys())[0] if ('multi_tool' in tools[tool] and tools[tool]['multi_tool'] == 'yes'): name = tools[tool]['name'] if name == 'unspecified': name = 'vent' template_path = join(tools[tool]['path'], name+'.template') else: template_path = join(tools[tool]['path'], 'vent.template') else: status = (False, "Couldn't save configuration") else: fd, template_path = tempfile.mkstemp(suffix='.template') options = ['namespace'] constraints.update({'type': 'registry'}) tools, _ = Template(manifest).constrain_opts(constraints, options) if tools: tool = list(tools.keys())[0] else: status = (False, "Couldn't save configuration") if status[0]: try: # save in vent.template with open(template_path, 'w') as f: f.write(config_val) # save in plugin_manifest vent_template = Template(template_path) manifest = Template(manifest) if instances > 1: # add instances as needed for i in range(1, instances + 1): i_section = tool.rsplit(':', 2) i_section[0] += str(i) if i != 1 else '' i_section = ':'.join(i_section) if not manifest.section(i_section)[0]: manifest.add_section(i_section) for val_pair in manifest.section(tool)[1]: name = val_pair[0] val = val_pair[1] if name == 'name': val += str(i) elif name == 'last_updated': val = Timestamp() elif name == 'running': val = 'no' manifest.set_option(i_section, name, val) template_to_manifest(vent_template, manifest, i_section, instances) else: settings = manifest.option(i_section, 'settings') if settings[0]: settings_dict = json.loads(settings[1]) settings_dict['instances'] = str(instances) manifest.set_option(i_section, 'settings', json.dumps( settings_dict)) else: inst = str(instances) settings_dict = {'instances': inst} manifest.set_option(i_section, 'settings', json.dumps( settings_dict)) else: try: settings_str = manifest.option(tool, 'settings')[1] settings_dict = json.loads(settings_str) old_instances = int(settings_dict['instances']) except Exception: old_instances = 1 template_to_manifest(vent_template, manifest, tool, old_instances) manifest.write_config() status = (True, manifest) except Exception as e: # pragma: no cover self.logger.error('save_configure error: ' + str(e)) status = (False, str(e)) # close os file handle and remove temp file if from_registry or instances > 1: try: close(fd) remove(template_path) except Exception as e: # pragma: no cover self.logger.error('save_configure error: ' + str(e)) else: with open(self.vent_config, 'w') as f: f.write(config_val) return status def restart_tools(self, repo=None, name=None, groups=None, branch='master', version='HEAD', main_cfg=False, old_val='', new_val=''): """ Restart necessary tools based on changes that have been made either to vent.cfg or to vent.template. This includes tools that need to be restarted because they depend on other tools that were changed. """ status = (True, None) if not main_cfg: try: t_identifier = {'name': name, 'branch': branch, 'version': version} result = Template(System().manifest).constrain_opts(t_identifier, ['running', 'link_name']) tools = result[0] tool = list(tools.keys())[0] if ('running' in tools[tool] and tools[tool]['running'] == 'yes'): start_tools = [t_identifier] dependent_tools = [tools[tool]['link_name']] start_tools += Dependencies(dependent_tools) # TODO start_d = {} for tool_identifier in start_tools: self.clean(**tool_identifier) start_d.update(self.prep_start(**tool_identifier)[1]) if start_d: Tools().start(start_d, '', is_tool_d=True) except Exception as e: # pragma: no cover self.logger.error('Trouble restarting tool ' + name + ' because: ' + str(e)) status = (False, str(e)) else: try: # string manipulation to get tools into arrays ext_start = old_val.find('[external-services]') if ext_start >= 0: ot_str = old_val[old_val.find('[external-services]') + 20:] else: ot_str = '' old_tools = [] for old_tool in ot_str.split('\n'): if old_tool != '': old_tools.append(old_tool.split('=')[0].strip()) ext_start = new_val.find('[external-services]') if ext_start >= 0: nt_str = new_val[new_val.find('[external-services]') + 20:] else: nt_str = '' new_tools = [] for new_tool in nt_str.split('\n'): if new_tool != '': new_tools.append(new_tool.split('=')[0].strip()) # find tools changed tool_changes = [] for old_tool in old_tools: if old_tool not in new_tools: tool_changes.append(old_tool) for new_tool in new_tools: if new_tool not in old_tools: tool_changes.append(new_tool) else: # tool name will be the same oconf = old_val[old_val.find(new_tool):].split('\n')[0] nconf = new_val[new_val.find(new_tool):].split('\n')[0] if oconf != nconf: tool_changes.append(new_tool) # put link names in a dictionary for finding dependencies dependent_tools = [] for i, entry in enumerate(tool_changes): dependent_tools.append(entry) # change names to lowercase for use in clean, prep_start tool_changes[i] = {'name': entry.lower().replace('-', '_')} dependencies = Dependencies(dependent_tools) # restart tools restart = tool_changes + dependencies tool_d = {} for tool in restart: self.clean(**tool) tool_d.update(self.prep_start(**tool)[1]) if tool_d: # TODO fix the arguments Tools().start(tool_d) except Exception as e: # pragma: no cover self.logger.error('Problem restarting tools: ' + str(e)) status = (False, str(e)) return status def upgrade(self): ''' Upgrades Vent itself, and core containers ''' # TODO return
def file_queue(path, template_path='/vent/', r_host='redis'): """ Processes files that have been added from the rq-worker, starts plugins that match the mime type for the new file. """ import configparser import ast import docker import json import requests import os import sys import time import uuid from redis import Redis from redis import StrictRedis from rq import Queue from subprocess import check_output, Popen, PIPE from string import punctuation from vent.helpers.logs import Logger status = (True, None) images = [] configs = {} logger = Logger(__name__) if (os.path.isfile('/root/.vent/vent.cfg') and os.path.isfile('/root/.vent/plugin_manifest.cfg')): template_path = '/root/.vent/' try: d_client = docker.from_env() # get the correct path for binding vent_config = configparser.ConfigParser(interpolation=None) vent_config.optionxform = str vent_config.read(template_path + 'vent.cfg') if (vent_config.has_section('main') and vent_config.has_option('main', 'files')): files = vent_config.get('main', 'files') else: files = '/' # deal with ~ files = os.path.expanduser(files) chars = set(punctuation) chars.discard('/') chars.discard('_') chars.discard('-') file_name = '' # escape any funky symbols to allow users FREEDOM of directory name for char in files: if char in chars: if char == '\\': file_name += '\\' + char else: file_name += '\\\\' + char else: file_name += char files = file_name _, path = path.split('_', 1) directory = path.rsplit('/', 1)[0] path = path.replace('/files', files, 1) path_copy = path # read in configuration of plugins to get the ones that should run # against the path. # keep track of images that failed getting configurations for failed_images = set() config = configparser.ConfigParser(interpolation=None) config.optionxform = str logger.debug('Path to manifest: ' + template_path + 'plugin_manifest.cfg') config.read(template_path + 'plugin_manifest.cfg') sections = config.sections() name_maps = {} orig_path_d = {} path_cmd = {} labels_d = {} # get all name maps for section in sections: link_name = config.get(section, 'link_name') image_name = config.get(section, 'image_name') name_maps[link_name] = image_name.replace(':', '-').replace('/', '-') for section in sections: path = path_copy orig_path = '' repo = config.get(section, 'repo') t_type = config.get(section, 'type') labels = { 'vent-plugin': '', 'file': path, 'vent.section': section, 'vent.repo': repo, 'vent.type': t_type } image_name = config.get(section, 'image_name') link_name = config.get(section, 'link_name') if config.has_option(section, 'service'): try: options_dict = json.loads(config.get(section, 'service')) for option in options_dict: value = options_dict[option] labels[option] = value except Exception as e: # pragma: no cover failed_images.add(image_name) status = (False, str(e)) if config.has_option(section, 'settings'): try: options_dict = json.loads(config.get(section, 'settings')) in_base = directory == '/files' # process base by default process_file = in_base # check if this tool shouldn't process the base by default if 'process_base' in options_dict: if options_dict['process_base'] == 'no': process_file = False # check if this tool should look at subdirs created by # other tools' output if 'process_from_tool' in options_dict and not in_base: for tool in options_dict['process_from_tool'].split( ','): dir_pieces = directory.split('/') dir_check = dir_pieces for dir_piece in dir_pieces: if 'UTC' in dir_piece: dir_check = dir_piece if tool.replace(' ', '-') in dir_check: process_file = True if 'ext_types' in options_dict and process_file: ext_types = options_dict['ext_types'].split(',') for ext_type in ext_types: if path.endswith(ext_type): images.append(image_name) configs[image_name] = {} except Exception as e: # pragma: no cover failed_images.add(image_name) status = (False, str(e)) if image_name in configs: if config.has_option(section, 'docker'): try: options_dict = ast.literal_eval( config.get(section, 'docker')) for option in options_dict: try: configs[image_name][option] = ast.literal_eval( options_dict[option]) except Exception as e: # pragma: no cover configs[image_name][option] = options_dict[ option] if 'links' in configs[image_name]: for link in configs[image_name]['links']: if link in name_maps: configs[image_name]['links'][ name_maps[link]] = configs[image_name][ 'links'].pop(link) # TODO volumes_from # TODO external services except Exception as e: # pragma: no cover failed_images.add(image_name) status = (False, str(e)) if config.has_option(section, 'gpu') and image_name in configs: try: options_dict = json.loads(config.get(section, 'gpu')) if 'enabled' in options_dict: enabled = options_dict['enabled'] if enabled == 'yes': configs[image_name]['gpu_options'] = options_dict labels['vent.gpu'] = 'yes' if 'dedicated' in options_dict: labels['vent.gpu.dedicated'] = options_dict[ 'dedicated'] if 'device' in options_dict: labels['vent.gpu.device'] = options_dict[ 'device'] if 'mem_mb' in options_dict: labels['vent.gpu.mem_mb'] = options_dict[ 'mem_mb'] port = '' host = '' if (vent_config.has_section('nvidia-docker-plugin') and vent_config.has_option( 'nvidia-docker-plugin', 'port')): port = vent_config.get('nvidia-docker-plugin', 'port') else: port = '3476' if (vent_config.has_section('nvidia-docker-plugin') and vent_config.has_option( 'nvidia-docker-plugin', 'host')): host = vent_config.get('nvidia-docker-plugin', 'host') else: # grab the default gateway try: route = Popen(('/sbin/ip', 'route'), stdout=PIPE) host = check_output( ('awk', '/default/ {print$3}'), stdin=route.stdout).strip().decode( 'utf-8') route.wait() except Exception as e: # pragma no cover logger.error('Default gateway ' 'went wrong ' + str(e)) nd_url = 'http://' + host + ':' + port + '/v1.0/docker/cli' params = {'vol': 'nvidia_driver'} try: r = requests.get(nd_url, params=params) if r.status_code == 200: options = r.text.split() for option in options: if option.startswith( '--volume-driver='): configs[image_name][ 'volume_driver'] = option.split( '=', 1)[1] elif option.startswith('--volume='): vol = option.split('=', 1)[1].split(':') if 'volumes' in configs[ image_name]: # !! TODO handle if volumes is a list configs[image_name]['volumes'][ vol[0]] = { 'bind': vol[1], 'mode': vol[2] } else: configs[image_name][ 'volumes'] = { vol[0]: { 'bind': vol[1], 'mode': vol[2] } } elif option.startswith('--device='): dev = option.split('=', 1)[1] if 'devices' in configs[ image_name]: configs[image_name][ 'devices'].append(dev + ':' + dev + ':rwm') else: configs[image_name][ 'devices'] = [ dev + ':' + dev + ':rwm' ] else: # unable to parse option provided by # nvidia-docker-plugin pass except Exception as e: # pragma: no cover failed_images.add(image_name) status = (False, str(e)) logger.error( 'Failure with nvidia-docker-plugin: ' + str(e)) except Exception as e: # pragma: no cover failed_images.add(image_name) status = (False, str(e)) logger.error('Unable to process gpu options: ' + str(e)) path_cmd[image_name] = path orig_path_d[image_name] = orig_path labels_d[image_name] = labels # TODO get syslog address rather than hardcode # TODO add group label # TODO get group and name for syslog tag log_config = { 'type': 'syslog', 'config': { 'syslog-address': 'tcp://127.0.0.1:514', 'syslog-facility': 'daemon', 'tag': '{{.Name}}' } } # setup gpu queue can_queue_gpu = True try: q = Queue(connection=Redis(host=r_host), default_timeout=86400) r = StrictRedis(host=r_host, port=6379, db=0) except Exception as e: # pragma: no cover can_queue_gpu = False logger.error('Unable to connect to redis: ' + str(e)) # start containers for image in images: if image not in failed_images: orig_path = orig_path_d[image] labels = labels_d[image] configs[image]['auto_remove'] = True name = image.replace('/', '-').replace(':', '-') + '_' + \ str(int(time.time()))+'_'+str(uuid.uuid4())[:4] if orig_path: # replay_pcap is special so we can't bind it like normal # since the plugin takes in an additional argument dir_path = orig_path.rsplit('/', 1)[0] else: dir_path = path.rsplit('/', 1)[0] volumes = {dir_path: {'bind': dir_path, 'mode': 'rw'}} if 'volumes' in configs[image]: for volume in volumes: configs[image]['volumes'][volume] = volumes[volume] else: configs[image]['volumes'] = volumes command = path_cmd[image] if 'command' in configs[image]: command = configs[image]['command'] + ' ' + command del configs[image]['command'] if 'vent.gpu' in labels and labels['vent.gpu'] == 'yes': if can_queue_gpu: # queue up containers requiring a gpu q_str = json.dumps({ 'image': image, 'command': command, 'labels': labels, 'detach': True, 'name': name, 'network': 'vent', 'log_config': log_config, 'configs': configs[image] }) q.enqueue('watch.gpu_queue', q_str, ttl=2592000) else: failed_images.add(image) else: if 'gpu_options' in configs[image]: del configs[image]['gpu_options'] # TODO check for links change_networking = False links = [] network_name = '' configs[image]['network'] = 'vent' if 'links' in configs[image]: for link in configs[image]['links']: links.append((link, configs[image]['links'][link])) if 'network' in configs[image]: network_name = configs[image]['network'] del configs[image]['network'] del configs[image]['links'] change_networking = True cont = d_client.containers.create(image=image, command=command, labels=labels, detach=True, name=name, log_config=log_config, **configs[image]) cont_id = cont.id if change_networking: network_to_attach = d_client.networks.list( names=[network_name]) if len(network_to_attach) > 0: logger.info( 'Attaching to network: "{0}" with the following links: {1}' .format(network_name, links)) network_to_attach[0].connect(cont_id, links=links) logger.info('Detaching from network: bridge') network_to_detach = d_client.networks.list( names=['bridge']) network_to_detach[0].disconnect(cont_id) cont.start() try: r.hincrby('vent_plugin_counts', image) except Exception as e: # pragma: no cover logger.error( 'Failed to update count of plugins because: {0}'. format(str(e))) if failed_images: status = (False, failed_images) else: status = (True, images) except Exception as e: # pragma: no cover status = (False, str(e)) logger.error('Error on line {}'.format(sys.exc_info()[-1].tb_lineno)) logger.error('Failed to process job: ' + str(e)) logger.info(str(status)) return status
class Image: def __init__(self, manifest): self.manifest = manifest self.d_client = docker.from_env() self.logger = Logger(__name__) def add(self, image, link_name, tag=None, registry=None, groups=None): status = (True, None) try: pull_name = image org = '' name = image if '/' in image: org, name = image.split('/') else: org = 'official' if not tag: tag = 'latest' if not registry: registry = 'docker.io' if not link_name: link_name = name if not groups: groups = '' full_image = registry + '/' + image + ':' + tag image = self.d_client.images.pull(full_image) section = ':'.join([registry, org, name, '', tag]) namespace = org + '/' + name # set template section and options for tool at version and branch template = Template(template=self.manifest) template.add_section(section) template.set_option(section, 'name', name) template.set_option(section, 'pull_name', pull_name) template.set_option(section, 'namespace', namespace) template.set_option(section, 'path', '') template.set_option(section, 'repo', registry + '/' + org) template.set_option(section, 'branch', '') template.set_option(section, 'version', tag) template.set_option(section, 'last_updated', str(datetime.utcnow()) + ' UTC') template.set_option(section, 'image_name', image.attrs['RepoTags'][0]) template.set_option(section, 'type', 'registry') template.set_option(section, 'link_name', link_name) template.set_option(section, 'commit_id', '') template.set_option(section, 'built', 'yes') template.set_option(section, 'image_id', image.attrs['Id'].split(':')[1][:12]) template.set_option(section, 'groups', groups) # write out configuration to the manifest file template.write_config() status = (True, 'Successfully added ' + full_image) except Exception as e: # pragma: no cover self.logger.error('Failed to add image because: {0}'.format( str(e))) status = (False, str(e)) return status def update(self, image): # TODO return
def __init__(self, es_host="elasticsearch", rmq_host="rabbitmq"): """ initialize host information """ self.es_host = es_host self.rmq_host = rmq_host self.logger = Logger(__name__)
class Tools: def __init__(self, version='HEAD', branch='master', user=None, pw=None, *args, **kwargs): self.version = version self.branch = branch self.user = user self.pw = pw self.d_client = docker.from_env() self.path_dirs = PathDirs(**kwargs) self.path_dirs.host_config() self.manifest = join(self.path_dirs.meta_dir, 'plugin_manifest.cfg') self.logger = Logger(__name__) def new(self, tool_type, uri, tools=None, link_name=None, image_name=None, overrides=None, tag=None, registry=None, groups=None): try: # remove tools that are already installed from being added if isinstance(tools, list): i = len(tools) - 1 while i >= 0: tool = tools[i] if tool[0].find('@') >= 0: tool_name = tool[0].split('@')[-1] else: tool_name = tool[0].rsplit('/', 1)[-1] constraints = { 'name': tool_name, 'repo': uri.split('.git')[0] } prev_installed, _ = Template( template=self.manifest).constrain_opts( constraints, []) # don't reinstall if prev_installed: tools.remove(tool) i -= 1 if len(tools) == 0: tools = None except Exception as e: # pragma: no cover self.logger.error('Add failed with error: {0}'.format(str(e))) return (False, str(e)) if tool_type == 'image': status = Image(self.manifest).add(uri, link_name, tag=tag, registry=registry, groups=groups) else: if tool_type == 'core': uri = 'https://github.com/cyberreboot/vent' core = True else: core = False status = Repository(self.manifest).add(uri, tools, overrides=overrides, version=self.version, image_name=image_name, branch=self.branch, user=self.user, pw=self.pw, core=core) return status def configure(self, tool): # TODO return def inventory(self, choices=None): """ Return a dictionary of the inventory items and status """ status = (True, None) if not choices: return (False, 'No choices made') try: # choices: repos, tools, images, built, running, enabled items = { 'repos': [], 'tools': {}, 'images': {}, 'built': {}, 'running': {}, 'enabled': {} } tools = Template(self.manifest).list_tools() for choice in choices: for tool in tools: try: if choice == 'repos': if 'repo' in tool: if (tool['repo'] and tool['repo'] not in items[choice]): items[choice].append(tool['repo']) elif choice == 'tools': items[choice][tool['section']] = tool['name'] elif choice == 'images': # TODO also check against docker items[choice][tool['section']] = tool['image_name'] elif choice == 'built': items[choice][tool['section']] = tool['built'] elif choice == 'running': containers = Containers() status = 'not running' for container in containers: image_name = tool['image_name'] \ .rsplit(':' + tool['version'], 1)[0] image_name = image_name.replace(':', '-') image_name = image_name.replace('/', '-') self.logger.info('image_name: ' + image_name) if container[0] == image_name: status = container[1] elif container[0] == image_name + \ '-' + tool['version']: status = container[1] items[choice][tool['section']] = status elif choice == 'enabled': items[choice][tool['section']] = tool['enabled'] else: # unknown choice pass except Exception as e: # pragma: no cover self.logger.error('Unable to grab info about tool: ' + str(tool) + ' because: ' + str(e)) status = (True, items) except Exception as e: # pragma: no cover self.logger.error('Inventory failed with error: {0}'.format( str(e))) status = (False, str(e)) return status def remove(self, repo, name): args = locals() status = (True, None) # get resulting dict of sections with options that match constraints template = Template(template=self.manifest) results, _ = template.constrain_opts(args, []) for result in results: response, image_name = template.option(result, 'image_name') name = template.option(result, 'name')[1] try: settings_dict = json.loads( template.option(result, 'settings')[1]) instances = int(settings_dict['instances']) except Exception: instances = 1 try: # check for container and remove c_name = image_name.replace(':', '-').replace('/', '-') for i in range(1, instances + 1): container_name = c_name + str(i) if i != 1 else c_name container = self.d_client.containers.get(container_name) response = container.remove(v=True, force=True) self.logger.info( 'Removing container: {0}'.format(container_name)) except Exception as e: # pragma: no cover self.logger.warning('Unable to remove the container: ' + container_name + ' because: ' + str(e)) # check for image and remove try: response = None image_id = template.option(result, 'image_id')[1] response = self.d_client.images.remove(image_id, force=True) self.logger.info('Removing image: ' + image_name) except Exception as e: # pragma: no cover self.logger.warning('Unable to remove the image: ' + image_name + ' because: ' + str(e)) # remove tool from the manifest for i in range(1, instances + 1): res = result.rsplit(':', 2) res[0] += str(i) if i != 1 else '' res = ':'.join(res) if template.section(res)[0]: status = template.del_section(res) self.logger.info('Removing tool: ' + res) # TODO if all tools from a repo have been removed, remove the repo template.write_config() return status def start(self, repo, name, is_tool_d=False): if is_tool_d: tool_d = repo else: args = locals() del args['self'] del args['is_tool_d'] tool_d = {} tool_d.update(self._prep_start(**args)[1]) status = (True, None) try: # check start priorities (priority of groups alphabetical for now) group_orders = {} groups = [] containers_remaining = [] username = getpass.getuser() # remove tools that have the hidden label tool_d_copy = copy.deepcopy(tool_d) for container in tool_d_copy: if 'labels' in tool_d_copy[ container] and 'vent.groups' in tool_d_copy[container][ 'labels']: groups_copy = tool_d_copy[container]['labels'][ 'vent.groups'].split(',') if 'hidden' in groups_copy: del tool_d[container] for container in tool_d: containers_remaining.append(container) self.logger.info("User: '******' starting container: {1}".format( username, container)) if 'labels' in tool_d[container]: if 'vent.groups' in tool_d[container]['labels']: groups += tool_d[container]['labels'][ 'vent.groups'].split(',') if 'vent.priority' in tool_d[container]['labels']: priorities = tool_d[container]['labels'][ 'vent.priority'].split(',') container_groups = tool_d[container]['labels'][ 'vent.groups'].split(',') for i, priority in enumerate(priorities): if container_groups[i] not in group_orders: group_orders[container_groups[i]] = [] group_orders[container_groups[i]].append( (int(priority), container)) containers_remaining.remove(container) tool_d[container]['labels'].update( {'started-by': username}) else: tool_d[container].update( {'labels': { 'started-by': username }}) # start containers based on priorities p_results = self._start_priority_containers( groups, group_orders, tool_d) # start the rest of the containers that didn't have any priorities r_results = self._start_remaining_containers( containers_remaining, tool_d) results = (p_results[0] + r_results[0], p_results[1] + r_results[1]) if len(results[1]) > 0: status = (False, results) else: status = (True, results) except Exception as e: # pragma: no cover self.logger.error('Start failed with error: {0}'.format(str(e))) status = (False, str(e)) return status def _prep_start(self, repo, name): args = locals() status = (True, None) try: options = [ 'name', 'namespace', 'built', 'groups', 'path', 'image_name', 'branch', 'repo', 'type', 'version' ] vent_config = Template(template=self.path_dirs.cfg_file) manifest = Template(self.manifest) files = vent_config.option('main', 'files') files = (files[0], expanduser(files[1])) s, _ = manifest.constrain_opts(args, options) status, tool_d = self._start_sections(s, files) # look out for links to delete because they're defined externally links_to_delete = set() # get instances for each tool tool_instances = {} sections = manifest.sections()[1] for section in sections: settings = manifest.option(section, 'settings') if settings[0]: settings = json.loads(settings[1]) if 'instances' in settings: l_name = manifest.option(section, 'link_name') if l_name[0]: tool_instances[l_name[1]] = int( settings['instances']) # check and update links, volumes_from, network_mode for container in list(tool_d.keys()): if 'labels' not in tool_d[ container] or 'vent.groups' not in tool_d[container][ 'labels'] or 'core' not in tool_d[container][ 'labels']['vent.groups']: tool_d[container]['remove'] = True if 'links' in tool_d[container]: for link in list(tool_d[container]['links'].keys()): # add links to external services already running if # necessary, by default configure local services too configure_local = True ext = 'external-services' if link in vent_config.options(ext)[1]: try: lconf = json.loads( vent_config.option(ext, link)[1]) if ('locally_active' not in lconf or lconf['locally_active'] == 'no'): ip_adr = lconf['ip_address'] port = lconf['port'] tool_d[container]['extra_hosts'] = {} # containers use lowercase names for # connections tool_d[container]['extra_hosts'][ link.lower()] = ip_adr # create an environment variable for container # to access port later env_variable = link.upper() + \ '_CUSTOM_PORT=' + port if 'environment' not in tool_d[container]: tool_d[container]['environment'] = [] tool_d[container]['environment'].append( env_variable) # remove the entry from links because no # longer connecting to local container links_to_delete.add(link) configure_local = False except Exception as e: # pragma: no cover self.logger.error( 'Could not load external settings because: {0}' .format(str(e))) configure_local = True status = False if configure_local: for c in list(tool_d.keys()): if ('tmp_name' in tool_d[c] and tool_d[c]['tmp_name'] == link): tool_d[container]['links'][ tool_d[c]['name']] = tool_d[container][ 'links'].pop(link) if link in tool_instances and tool_instances[ link] > 1: for i in range( 2, tool_instances[link] + 1): tool_d[container]['links'][ tool_d[c]['name'] + str(i)] = tool_d[container][ 'links'][tool_d[c] ['name']] + str(i) if 'volumes_from' in tool_d[container]: tmp_volumes_from = tool_d[container]['volumes_from'] tool_d[container]['volumes_from'] = [] for volumes_from in list(tmp_volumes_from): for c in list(tool_d.keys()): if ('tmp_name' in tool_d[c] and tool_d[c]['tmp_name'] == volumes_from): tool_d[container]['volumes_from'].append( tool_d[c]['name']) tmp_volumes_from.remove(volumes_from) tool_d[container]['volumes_from'] += tmp_volumes_from if 'network_mode' in tool_d[container]: if tool_d[container]['network_mode'].startswith( 'container:'): network_c_name = tool_d[container][ 'network_mode'].split('container:')[1] for c in list(tool_d.keys()): if ('tmp_name' in tool_d[c] and tool_d[c]['tmp_name'] == network_c_name): tool_d[container]['network_mode'] = 'container:' + \ tool_d[c]['name'] # remove tmp_names for c in list(tool_d.keys()): if 'tmp_name' in tool_d[c]: del tool_d[c]['tmp_name'] # remove links section if all were externally configured for c in list(tool_d.keys()): if 'links' in tool_d[c]: for link in links_to_delete: if link in tool_d[c]['links']: del tool_d[c]['links'][link] # delete links if no more defined if not tool_d[c]['links']: del tool_d[c]['links'] # remove containers that shouldn't be started for c in list(tool_d.keys()): deleted = False if 'start' in tool_d[c] and not tool_d[c]['start']: del tool_d[c] deleted = True if not deleted: # look for tools services that are being done externally # tools are capitalized in vent.cfg, so make them lowercase # for comparison ext = 'external-services' external_tools = vent_config.section(ext)[1] name = tool_d[c]['labels']['vent.name'] for tool in external_tools: if name == tool[0].lower(): try: tool_config = json.loads(tool[1]) if ('locally_active' in tool_config and tool_config['locally_active'] == 'no'): del tool_d[c] except Exception as e: # pragma: no cover self.logger.warning( 'Locally running container ' + name + ' may be redundant') if status: status = (True, tool_d) else: status = (False, tool_d) except Exception as e: # pragma: no cover self.logger.error('_prep_start failed with error: ' + str(e)) status = (False, e) return status def _start_sections(self, s, files): tool_d = {} status = (True, None) for section in s: # initialize needed vars c_name = s[section]['image_name'].replace(':', '-') c_name = c_name.replace('/', '-') instance_num = re.search(r'\d+$', s[section]['name']) if instance_num: c_name += instance_num.group() image_name = s[section]['image_name'] # checkout the right version and branch of the repo tool_d[c_name] = {'image': image_name, 'name': c_name} # get rid of all commented sections in various runtime # configurations manifest = Template(self.manifest) overall_dict = {} for setting in ['info', 'docker', 'gpu', 'settings', 'service']: option = manifest.option(section, setting) if option[0]: overall_dict[setting] = {} settings_dict = json.loads(option[1]) for opt in settings_dict: if not opt.startswith('#'): overall_dict[setting][opt] = settings_dict[opt] if 'docker' in overall_dict: options_dict = overall_dict['docker'] for option in options_dict: options = options_dict[option] # check for commands to evaluate if '`' in options: cmds = options.split('`') if len(cmds) > 2: i = 1 while i < len(cmds): try: cmds[i] = check_output( shlex.split(cmds[i]), stderr=STDOUT, close_fds=True).strip().decode('utf-8') except Exception as e: # pragma: no cover self.logger.error( 'unable to evaluate command specified in vent.template: ' + str(e)) i += 2 options = ''.join(cmds) # check for commands to evaluate # store options set for docker try: tool_d[c_name][option] = ast.literal_eval(options) except Exception as e: # pragma: no cover self.logger.debug('Unable to literal_eval: {0}'.format( str(options))) tool_d[c_name][option] = options if 'labels' not in tool_d[c_name]: tool_d[c_name]['labels'] = {} # get the service uri info if 'service' in overall_dict: try: options_dict = overall_dict['service'] for option in options_dict: tool_d[c_name]['labels'][option] = options_dict[option] except Exception as e: # pragma: no cover self.logger.error('unable to store service options for ' 'docker: ' + str(e)) # check for gpu settings if 'gpu' in overall_dict: try: options_dict = json.loads(status[1]) for option in options_dict: tool_d[c_name]['labels']['gpu.' + option] = options_dict[option] except Exception as e: # pragma: no cover self.logger.error('unable to store gpu options for ' 'docker: ' + str(e)) # get temporary name for links, etc. plugin_c = Template(template=self.manifest) status, plugin_sections = plugin_c.sections() for plugin_section in plugin_sections: status = plugin_c.option(plugin_section, 'link_name') image_status = plugin_c.option(plugin_section, 'image_name') if status[0] and image_status[0]: cont_name = image_status[1].replace(':', '-') cont_name = cont_name.replace('/', '-') if cont_name not in tool_d: tool_d[cont_name] = { 'image': image_status[1], 'name': cont_name, 'start': False } tool_d[cont_name]['tmp_name'] = status[1] # add extra labels tool_d[c_name]['labels']['vent'] = Version() tool_d[c_name]['labels']['vent.namespace'] = s[section][ 'namespace'] tool_d[c_name]['labels']['vent.branch'] = s[section]['branch'] tool_d[c_name]['labels']['vent.version'] = s[section]['version'] tool_d[c_name]['labels']['vent.name'] = s[section]['name'] tool_d[c_name]['labels']['vent.section'] = section tool_d[c_name]['labels']['vent.repo'] = s[section]['repo'] tool_d[c_name]['labels']['vent.type'] = s[section]['type'] # check for log_config settings in external-services externally_configured = False vent_config = Template(self.path_dirs.cfg_file) for ext_tool in vent_config.section('external-services')[1]: if ext_tool[0].lower() == 'syslog': try: log_dict = json.loads(ext_tool[1]) # configure if not locally active if ('locally_active' not in log_dict or log_dict['locally_active'] == 'no'): del log_dict['locally_active'] log_config = {} log_config['type'] = 'syslog' log_config['config'] = {} ip_address = '' port = '' for option in log_dict: if option == 'ip_address': ip_address = log_dict[option] elif option == 'port': port = log_dict['port'] syslog_address = 'tcp://' + ip_address + ':' + port syslog_config = { 'syslog-address': syslog_address, 'syslog-facility': 'daemon', 'tag': '{{.Name}}' } log_config['config'].update(syslog_config) externally_configured = True except Exception as e: # pragma: no cover self.logger.error('external settings for log_config' " couldn't be stored because: " + str(e)) externally_configured = False if not externally_configured: log_config = { 'type': 'syslog', 'config': { 'syslog-address': 'tcp://0.0.0.0:514', 'syslog-facility': 'daemon', 'tag': '{{.Name}}' } } if 'groups' in s[section]: # add labels for groups tool_d[c_name]['labels']['vent.groups'] = s[section]['groups'] # add restart=always to core containers if 'core' in s[section]['groups']: tool_d[c_name]['restart_policy'] = {'Name': 'always'} # map network names to environment variables if 'network' in s[section]['groups']: vent_config = Template(template=self.path_dirs.cfg_file) nic_mappings = vent_config.section('network-mapping') nics = '' if nic_mappings[0]: for nic in nic_mappings[1]: nics += nic[0] + ':' + nic[1] + ',' nics = nics[:-1] if nics: if 'environment' in tool_d[c_name]: tool_d[c_name]['environment'].append('VENT_NICS=' + nics) else: tool_d[c_name]['environment'] = [ 'VENT_NICS=' + nics ] # send logs to syslog if ('syslog' not in s[section]['groups'] and 'core' in s[section]['groups']): log_config['config']['tag'] = '{{.Name}}' tool_d[c_name]['log_config'] = log_config if 'syslog' not in s[section]['groups']: tool_d[c_name]['log_config'] = log_config # mount necessary directories if 'files' in s[section]['groups']: ulimits = [] ulimits.append( docker.types.Ulimit(name='nofile', soft=1048576, hard=1048576)) tool_d[c_name]['ulimits'] = ulimits # check if running in a docker container if 'VENT_CONTAINERIZED' in environ and environ[ 'VENT_CONTAINERIZED'] == 'true': if 'volumes_from' in tool_d[c_name]: tool_d[c_name]['volumes_from'].append( environ['HOSTNAME']) else: tool_d[c_name]['volumes_from'] = [ environ['HOSTNAME'] ] else: if 'volumes' in tool_d[c_name]: tool_d[c_name]['volumes'][ self.path_dirs.base_dir[:-1]] = { 'bind': '/vent', 'mode': 'ro' } else: tool_d[c_name]['volumes'] = { self.path_dirs.base_dir[:-1]: { 'bind': '/vent', 'mode': 'ro' } } if files[0]: if 'volumes' in tool_d[c_name]: tool_d[c_name]['volumes'][files[1]] = { 'bind': '/files', 'mode': 'rw' } else: tool_d[c_name]['volumes'] = { files[1]: { 'bind': '/files', 'mode': 'rw' } } else: tool_d[c_name]['log_config'] = log_config # add label for priority if 'settings' in overall_dict: try: options_dict = overall_dict['settings'] for option in options_dict: if option == 'priority': tool_d[c_name]['labels'][ 'vent.priority'] = options_dict[option] except Exception as e: # pragma: no cover self.logger.error('unable to store settings options ' 'for docker ' + str(e)) # only start tools that have been built if s[section]['built'] != 'yes': del tool_d[c_name] # store section information for adding info to manifest later else: tool_d[c_name]['section'] = section return status, tool_d def _start_priority_containers(self, groups, group_orders, tool_d): """ Select containers based on priorities to start """ vent_cfg = Template(self.path_dirs.cfg_file) cfg_groups = vent_cfg.option('groups', 'start_order') if cfg_groups[0]: cfg_groups = cfg_groups[1].split(',') else: cfg_groups = [] all_groups = sorted(set(groups)) s_conts = [] f_conts = [] # start tools in order of group defined in vent.cfg for group in cfg_groups: # remove from all_groups because already checked out if group in all_groups: all_groups.remove(group) if group in group_orders: for cont_t in sorted(group_orders[group]): if cont_t[1] not in s_conts: s_conts, f_conts = self._start_container( cont_t[1], tool_d, s_conts, f_conts) # start tools that haven't been specified in the vent.cfg, if any for group in all_groups: if group in group_orders: for cont_t in sorted(group_orders[group]): if cont_t[1] not in s_conts: s_conts, f_conts = self._start_container( cont_t[1], tool_d, s_conts, f_conts) return (s_conts, f_conts) def _start_remaining_containers(self, containers_remaining, tool_d): """ Select remaining containers that didn't have priorities to start """ s_containers = [] f_containers = [] for container in containers_remaining: s_containers, f_containers = self._start_container( container, tool_d, s_containers, f_containers) return (s_containers, f_containers) def _start_container(self, container, tool_d, s_containers, f_containers): """ Start container that was passed in and return status """ # use section to add info to manifest section = tool_d[container]['section'] del tool_d[container]['section'] manifest = Template(self.manifest) try: # try to start an existing container first c = self.d_client.containers.get(container) c.start() s_containers.append(container) manifest.set_option(section, 'running', 'yes') self.logger.info('started ' + str(container) + ' with ID: ' + str(c.short_id)) except Exception as err: s_containers, f_containers = self._run_container( container, tool_d, section, s_containers, f_containers) # save changes made to manifest manifest.write_config() return s_containers, f_containers def _run_container(self, container, tool_d, section, s_containers, f_containers): manifest = Template(self.manifest) try: gpu = 'gpu.enabled' failed = False if (gpu in tool_d[container]['labels'] and tool_d[container]['labels'][gpu] == 'yes'): vent_config = Template(template=self.path_dirs.cfg_file) port = '' host = '' result = vent_config.option('nvidia-docker-plugin', 'port') if result[0]: port = result[1] else: port = '3476' result = vent_config.option('nvidia-docker-plugin', 'host') if result[0]: host = result[1] else: # now just requires ip, ifconfig try: route = check_output( ('ip', 'route')).decode('utf-8').split('\n') default = '' # grab the default network device. for device in route: if 'default' in device: default = device.split()[4] break # grab the IP address for the default device ip_addr = check_output( ('ifconfig', default)).decode('utf-8') ip_addr = ip_addr.split('\n')[1].split()[1] host = ip_addr except Exception as e: # pragma no cover self.logger.error('failed to grab ip. Ensure that \ ip and ifconfig are installed') nd_url = 'http://' + host + ':' + port + '/v1.0/docker/cli' params = {'vol': 'nvidia_driver'} r = requests.get(nd_url, params=params) if r.status_code == 200: options = r.text.split() for option in options: if option.startswith('--volume-driver='): tool_d[container]['volume_driver'] = option.split( '=', 1)[1] elif option.startswith('--volume='): vol = option.split('=', 1)[1].split(':') if 'volumes' in tool_d[container]: if isinstance(tool_d[container]['volumes'], list): if len(vol) == 2: c_vol = vol[0] + \ ':' + vol[1] + ':rw' else: c_vol = vol[0] + ':' + \ vol[1] + ':' + vol[2] tool_d[container]['volumes'].append(c_vol) else: # Dictionary tool_d[container]['volumes'][vol[0]] = { 'bind': vol[1], 'mode': vol[2] } else: tool_d[container]['volumes'] = { vol[0]: { 'bind': vol[1], 'mode': vol[2] } } elif option.startswith('--device='): dev = option.split('=', 1)[1] if 'devices' in tool_d[container]: tool_d[container]['devices'].append(dev + ':' + dev + ':rwm') else: tool_d[container]['devices'] = [ dev + ':' + dev + ':rwm' ] else: self.logger.error('Unable to parse ' + 'nvidia-docker option: ' + str(option)) else: failed = True f_containers.append(container) manifest.set_option(section, 'running', 'failed') self.logger.error('failed to start ' + str(container) + ' because nvidia-docker-plugin ' + 'failed with: ' + str(r.status_code)) if not failed: try: self.d_client.containers.remove(container, force=True) self.logger.info('removed old existing container: ' + str(container)) except Exception as e: pass cont_id = self.d_client.containers.run(detach=True, **tool_d[container]) s_containers.append(container) manifest.set_option(section, 'running', 'yes') self.logger.info('started ' + str(container) + ' with ID: ' + str(cont_id)) except Exception as e: # pragma: no cover f_containers.append(container) manifest.set_option(section, 'running', 'failed') self.logger.error('failed to start ' + str(container) + ' because: ' + str(e)) return s_containers, f_containers def stop(self, repo, name): args = locals() status = (True, None) try: # !! TODO need to account for plugin containers that have random # names, use labels perhaps options = [ 'name', 'namespace', 'built', 'groups', 'path', 'image_name', 'branch', 'version' ] s, _ = Template(template=self.manifest).constrain_opts( args, options) for section in s: container_name = s[section]['image_name'].replace(':', '-') container_name = container_name.replace('/', '-') try: container = self.d_client.containers.get(container_name) container.stop() self.logger.info('Stopped {0}'.format(str(container_name))) except Exception as e: # pragma: no cover self.logger.error('Failed to stop ' + str(container_name) + ' because: ' + str(e)) except Exception as e: # pragma: no cover self.logger.error('Stop failed with error: ' + str(e)) status = (False, e) return status def repo_commits(self, repo): """ Get the commit IDs for all of the branches of a repository """ commits = [] try: status = self.path_dirs.apply_path(repo) # switch to directory where repo will be cloned to if status[0]: cwd = status[1] else: self.logger.error( 'apply_path failed. Exiting repo_commits with' ' status: ' + str(status)) return status status = self.repo_branches(repo) if status[0]: branches = status[1] for branch in branches: try: branch_output = check_output( shlex.split('git rev-list origin/' + branch), stderr=STDOUT, close_fds=True).decode('utf-8') branch_output = branch_output.split('\n')[:-1] branch_output += ['HEAD'] commits.append((branch, branch_output)) except Exception as e: # pragma: no cover self.logger.error('repo_commits failed with error: ' + str(e) + ' on branch: ' + str(branch)) status = (False, e) return status else: self.logger.error('repo_branches failed. Exiting repo_commits' ' with status: ' + str(status)) return status chdir(cwd) status = (True, commits) except Exception as e: # pragma: no cover self.logger.error('repo_commits failed with error: ' + str(e)) status = (False, e) return status def repo_branches(self, repo): """ Get the branches of a repository """ branches = [] try: # switch to directory where repo will be cloned to status = self.path_dirs.apply_path(repo) if status[0]: cwd = status[1] else: self.logger.error('apply_path failed. Exiting repo_branches' ' with status ' + str(status)) return status branch_output = check_output(shlex.split('git branch -a'), stderr=STDOUT, close_fds=True) branch_output = branch_output.split(b'\n') for branch in branch_output: br = branch.strip() if br.startswith(b'*'): br = br[2:] if b'/' in br: branches.append(br.rsplit(b'/', 1)[1].decode('utf-8')) elif br: branches.append(br.decode('utf-8')) branches = list(set(branches)) for branch in branches: try: check_output(shlex.split('git checkout ' + branch), stderr=STDOUT, close_fds=True) except Exception as e: # pragma: no cover self.logger.error('repo_branches failed with error: ' + str(e) + ' on branch: ' + str(branch)) status = (False, e) return status chdir(cwd) status = (True, branches) except Exception as e: # pragma: no cover self.logger.error('repo_branches failed with error: ' + str(e)) status = (False, e) return status def repo_tools(self, repo, branch, version): """ Get available tools for a repository branch at a version """ try: tools = [] status = self.path_dirs.apply_path(repo) # switch to directory where repo will be cloned to if status[0]: cwd = status[1] else: self.logger.error('apply_path failed. Exiting repo_tools with' ' status: ' + str(status)) return status # TODO commenting out for now, should use update_repo #status = self.p_helper.checkout(branch=branch, version=version) status = (True, None) if status[0]: path, _, _ = self.path_dirs.get_path(repo) tools = AvailableTools(path, version=version) else: self.logger.error('checkout failed. Exiting repo_tools with' ' status: ' + str(status)) return status chdir(cwd) status = (True, tools) except Exception as e: # pragma: no cover self.logger.error('repo_tools failed with error: ' + str(e)) status = (False, e) return status
class MenuHelper: """ Handle helper functions in the API for the Menu """ def __init__(self, **kargs): self.api_action = Action(**kargs) self.plugin = self.api_action.plugin self.p_helper = self.api_action.p_helper self.logger = Logger(__name__) def cores(self, action, branch="master", version='HEAD'): """ Supply action (install, build, start, stop, clean) for core tools """ self.logger.info("Starting: cores") status = (False, None) try: self.logger.info("action provided: " + str(action)) core = self.tools_status(True, branch=branch, version=version)[1] if action in ["install", "build"]: tools = [] core_repo = 'https://github.com/cyberreboot/vent' resp = self.p_helper.apply_path(core_repo) if resp[0]: cwd = resp[1] else: self.logger.info("apply_path failed. Exiting cores" " with status " + str(resp)) return resp path = os.path.join(self.plugin.path_dirs.plugins_dir, 'cyberreboot/vent') response = self.p_helper.checkout(branch=branch, version=version) self.logger.info("status of plugin checkout " + str(response)) matches = self.p_helper.available_tools(path, version=version, groups='core') for match in matches: name = match[0].rsplit('/')[-1] constraints = {'name': name, 'repo': core_repo} prev_installed, _ = self.p_helper. \ constraint_options(constraints, []) if not prev_installed: tools.append((match[0], '')) # only add stuff not already installed or repo specification if ((tools) or (isinstance(matches, list) and len(matches) == 0)): status = self.plugin.add(core_repo, tools=tools, branch=branch, build=False, core=True) self.logger.info("status of plugin add: " + str(status)) else: self.logger.info("no new tools to install") status = (True, "previously installed") plugin_c = Template(template=self.plugin.manifest) sections = plugin_c.sections() for tool in core['normal']: for section in sections[1]: name = plugin_c.option(section, "name") orig_branch = plugin_c.option(section, "branch") namespace = plugin_c.option(section, "namespace") version = plugin_c.option(section, "version") if (name[1] == tool and orig_branch[1] == branch and namespace[1] == "cyberreboot/vent" and version[1] == "HEAD"): plugin_c.set_option(section, "image_name", "cyberreboot/vent-" + tool.replace('_', '-') + ":" + branch) plugin_c.write_config() chdir(cwd) if action == "build": plugin_c = Template(template=self.plugin.manifest) sections = plugin_c.sections() try: for tool in core['normal']: for section in sections[1]: tool = tool.replace('_', '-') image_name = plugin_c.option(section, "image_name") check_image = "cyberreboot/vent-" check_image += tool + ":" + branch if image_name[1] == check_image: timestamp = str(datetime.utcnow()) + " UTC" try: # currently can't use docker-py because it # returns a 404 on pull so no way to valid # if it worked or didn't image_id = None cmd = "docker pull " + check_image output = check_output(shlex.split(cmd), stderr=STDOUT) # image_name in format of (bool, image_name) name = image_name[1] d_client = docker.from_env() image_attrs = d_client.images.get(name) image_attrs = image_attrs.attrs image_id = image_attrs['Id'].split(':')[1][:12] if image_id: plugin_c.set_option(section, "built", "yes") plugin_c.set_option(section, "image_id", image_id) plugin_c.set_option(section, "last_updated", timestamp) status = (True, "Pulled " + tool) self.logger.info(str(status)) else: plugin_c.set_option(section, "built", "failed") plugin_c.set_option(section, "last_updated", timestamp) status = (False, "Failed to pull image " + str(output.split('\n')[-1])) self.logger.error(str(status)) except Exception as e: # pragma: no cover plugin_c.set_option(section, "built", "failed") plugin_c.set_option(section, "last_updated", timestamp) status = (False, "Failed to pull image " + str(e)) self.logger.error(str(status)) except Exception as e: # pragma: no cover status = (False, "Failed to pull images " + str(e)) self.logger.error(str(status)) plugin_c.write_config() elif action == "start": status = self.api_action.prep_start(groups="core", branch=branch) if status[0]: tool_d = status[1] status = self.api_action.start(tool_d) elif action == "stop": status = self.api_action.stop(groups="core", branch=branch) elif action == "clean": status = self.api_action.clean(groups="core", branch=branch) except Exception as e: # pragma: no cover self.logger.info("core failed with error: " + str(e)) status = (False, e) self.logger.info("Status of core: " + str(status[0])) self.logger.info("Finished: core") return status def repo_branches(self, repo): """ Get the branches of a repository """ self.logger.info("Starting: repo_branches") self.logger.info("repo given: " + str(repo)) branches = [] try: # switch to directory where repo will be cloned to status = self.p_helper.apply_path(repo) if status[0]: cwd = status[1] else: self.logger.info("apply_path failed. Exiting repo_branches" " with status " + str(status)) return status check_output(shlex.split("git pull --all"), stderr=STDOUT, close_fds=True) branch_output = check_output(shlex.split("git branch -a"), stderr=STDOUT, close_fds=True) branch_output = branch_output.split("\n") for branch in branch_output: b = branch.strip() if b.startswith('*'): b = b[2:] if "/" in b: branches.append(b.rsplit('/', 1)[1]) elif b: branches.append(b) branches = list(set(branches)) self.logger.info("branches found: " + str(branches)) for branch in branches: try: check_output(shlex.split("git checkout " + branch), stderr=STDOUT, close_fds=True) except Exception as e: # pragma: no cover self.logger.error("repo_branches failed with error: " + str(e) + " on branch: " + str(branch)) status = (False, e) self.logger.info("Exiting repo_branches with status: " + str(status)) return status chdir(cwd) status = (True, branches) except Exception as e: # pragma: no cover self.logger.error("repo_branches failed with error: " + str(e)) status = (False, e) self.logger.info("Status of repo_branches: " + str(status)) self.logger.info("Finished: repo_branches") return status def repo_commits(self, repo): """ Get the commit IDs for all of the branches of a repository """ self.logger.info("Starting: repo_commits") self.logger.info("repo given: " + str(repo)) commits = [] try: status = self.p_helper.apply_path(repo) # switch to directory where repo will be cloned to if status[0]: cwd = status[1] else: self.logger.info("apply_path failed. Exiting repo_commits with" " status: " + str(status)) return status status = self.repo_branches(repo) if status[0]: branches = status[1] for branch in branches: try: branch_output = check_output(shlex .split("git rev-list " + branch), stderr=STDOUT, close_fds=True) branch_output = branch_output.split("\n")[:-1] branch_output += ['HEAD'] commits.append((branch, branch_output)) except Exception as e: # pragma: no cover self.logger.error("repo_commits failed with error: " + str(e) + " on branch: " + str(branch)) status = (False, e) self.logger.info("Exiting repo_commits with status: " + str(status)) return status else: self.logger.info("repo_branches failed. Exiting repo_commits" " with status: " + str(status)) return status chdir(cwd) status = (True, commits) except Exception as e: # pragma: no cover self.logger.error("repo_commits failed with error: " + str(e)) status = (False, e) self.logger.info("Status of repo_commits: " + str(status)) self.logger.info("Finished: repo_commits") return status def repo_tools(self, repo, branch, version): """ Get available tools for a repository branch at a version """ self.logger.info("Starting: repo_tools") self.logger.info("repo given: " + str(repo)) self.logger.info("branch given: " + str(branch)) self.logger.info("version given: " + str(version)) try: tools = [] status = self.p_helper.apply_path(repo) # switch to directory where repo will be cloned to if status[0]: cwd = status[1] else: self.logger.info("apply_path failed. Exiting repo_tools with" " status: " + str(status)) return status status = self.p_helper.checkout(branch=branch, version=version) if status[0]: path, _, _ = self.p_helper.get_path(repo) tools = self.p_helper.available_tools(path, version=version) else: self.logger.info("checkout failed. Exiting repo_tools with" " status: " + str(status)) return status chdir(cwd) status = (True, tools) except Exception as e: # pragma: no cover self.logger.error("repo_tools failed with error: " + str(e)) status = (False, e) self.logger.info("Status of repo_tools: " + str(status)) self.logger.info("Finished: repo_tools") return status def tools_status(self, core, branch="master", version="HEAD", **kargs): """ Get tools that are currently installed/built/running and also the number of repos that those tools come from; can toggle whether looking for core tools or plugin tools """ # !! TODO this might need to store namespaces/branches/versions all_tools = {'built': [], 'running': [], 'installed': [], 'normal': []} core_repo = 'https://github.com/cyberreboot/vent' repos = set() tools = Tools(**kargs) # get manifest file manifest = os.path.join(self.api_action.plugin.path_dirs.meta_dir, "plugin_manifest.cfg") template = Template(template=manifest) tools = template.sections() # get repos if core: p_helper = PluginHelper(plugins_dir='.internals/plugins/') repos.add(core_repo) else: p_helper = PluginHelper(plugins_dir='plugins/') for tool in tools[1]: repo = template.option(tool, 'repo') if repo[0] and repo[1] != core_repo: repos.add(repo[1]) # get normal tools for repo in repos: status, _ = p_helper.clone(repo) if status: p_helper.apply_path(repo) p_helper.checkout(branch=branch, version=version) path, _, _ = p_helper.get_path(repo, core=core) matches = None if core: matches = p_helper.available_tools(path, version=version, groups='core') else: matches = p_helper.available_tools(path, version=version) for match in matches: if core: all_tools['normal'].append(match[0].split('/')[-1].replace('_', '-')) else: all_tools['normal'].append(match[0].split('/')[-1]) # get tools that have been installed for tool in tools[1]: repo = template.option(tool, "repo") if repo[0] and repo[1] in repos: name = template.option(tool, "name") if name[0]: all_tools['installed'].append(name[1].replace('_', '-')) # get tools that have been built and/or are running try: d_client = docker.from_env() images = d_client.images.list(filters={'label': 'vent'}) for image in images: try: core_check = ("vent.groups" in image.attrs['Config']['Labels'] and 'core' in image.attrs['Config']['Labels']['vent.groups']) image_check = None if core: image_check = core_check else: image_check = not core_check if image_check: if ('vent.name' in image.attrs['Config']['Labels'] and 'hidden' not in image.attrs['Config']['Labels']['vent.groups']): if core: all_tools['built'].append(image.attrs['Config']['Labels']['vent.name'].replace('_', '-')) else: all_tools['built'].append(image.attrs['Config']['Labels']['vent.name']) except Exception as err: # pragma: no cover self.logger.error("image_check went wrong " + str(err)) containers = d_client.containers.list(filters={'label': 'vent'}) for container in containers: try: core_check = ("vent.groups" in container.attrs['Config']['Labels'] and 'core' in container.attrs['Config']['Labels']['vent.groups']) container_check = None if core: container_check = core_check else: container_check = not core_check if container_check: if ('vent.name' in container.attrs['Config']['Labels'] and 'hidden' not in image.attrs['Config']['Labels']['vent.groups']): if core: all_tools['running'].append(container.attrs['Config']['Labels']['vent.name'].replace('_', '-')) else: all_tools['running'].append(container.attrs['Config']['Labels']['vent.name']) except Exception as err: # pragma: no cover self.logger.error("core_check went wrong " + str(err)) except Exception as e: # pragma: no cover self.logger.error("Something with docker went wrong " + str(e)) return (len(repos), all_tools)
def file_queue(path, template_path="/vent/", r_host="redis"): """ Processes files that have been added from the rq-worker, starts plugins that match the mime type for the new file. """ import ConfigParser import ast import docker import json import requests import os import sys from redis import Redis from rq import Queue from subprocess import check_output, Popen, PIPE from string import punctuation from vent.helpers.logs import Logger status = (True, None) images = [] configs = {} logger = Logger(__name__) if os.path.isdir("/root/.vent"): template_path = "/root/.vent/" try: d_client = docker.from_env() # get the correct path for binding vent_config = ConfigParser.RawConfigParser() vent_config.optionxform = str vent_config.read(template_path + 'vent.cfg') if (vent_config.has_section('main') and vent_config.has_option('main', 'files')): files = vent_config.get('main', 'files') else: files = '/' # deal with ~ files = os.path.expanduser(files) chars = set(punctuation) chars.discard('/') chars.discard('_') chars.discard('-') file_name = '' # escape any funky symbols to allow users FREEDOM of directory name for char in files: if char in chars: if char == '\\': file_name += '\\' + char else: file_name += '\\\\' + char else: file_name += char files = file_name _, path = path.split('_', 1) directory = path.rsplit('/', 1)[0] path = path.replace('/files', files, 1) path_copy = path # read in configuration of plugins to get the ones that should run # against the path. # keep track of images that failed getting configurations for failed_images = set() config = ConfigParser.RawConfigParser() config.optionxform = str config.read(template_path + 'plugin_manifest.cfg') sections = config.sections() name_maps = {} orig_path_d = {} path_cmd = {} labels_d = {} # get all name maps for section in sections: link_name = config.get(section, 'link_name') image_name = config.get(section, 'image_name') name_maps[link_name] = image_name.replace(':', '-').replace('/', '-') for section in sections: path = path_copy orig_path = '' repo = config.get(section, 'repo') t_type = config.get(section, 'type') labels = { 'vent-plugin': '', 'file': path, 'vent.section': section, 'vent.repo': repo, 'vent.type': t_type } image_name = config.get(section, 'image_name') link_name = config.get(section, 'link_name') # doesn't matter if it's a repository or registry because both in manifest if config.has_option(section, 'groups'): if 'replay' in config.get(section, 'groups'): try: # read the vent.cfg file to grab the network-mapping # specified. For replay_pcap n_name = 'network-mapping' n_map = [] if vent_config.has_section(n_name): # make sure that the options aren't empty if vent_config.options(n_name): options = vent_config.options(n_name) for option in options: if vent_config.get(n_name, option): n_map.append( vent_config.get(n_name, option)) orig_path = path path = str(n_map[0]) + " " + path except Exception as e: # pragma: no cover failed_images.add(image_name) status = (False, str(e)) if config.has_option(section, 'service'): try: options_dict = json.loads(config.get(section, 'service')) for option in options_dict: value = options_dict[option] labels[option] = value except Exception as e: # pragma: no cover failed_images.add(image_name) status = (False, str(e)) if config.has_option(section, 'settings'): try: options_dict = json.loads(config.get(section, 'settings')) in_base = directory == '/files' # process base by default process_file = in_base # check if this tool shouldn't process the base by default if 'process_base' in options_dict: if options_dict['process_base'] == 'no': process_file = False # check if this tool should look at subdirs created by # other tools' output if 'process_from_tool' in options_dict and not in_base: for tool in options_dict['process_from_tool'].split( ','): if tool.replace(' ', '-') in directory: process_file = True if 'ext_types' in options_dict and process_file: ext_types = options_dict['ext_types'].split(',') for ext_type in ext_types: if path.endswith(ext_type): images.append(image_name) configs[image_name] = {} except Exception as e: # pragma: no cover failed_images.add(image_name) status = (False, str(e)) if image_name in configs: if config.has_option(section, 'docker'): try: options_dict = ast.literal_eval( config.get(section, 'docker')) for option in options_dict: try: configs[image_name][option] = ast.literal_eval( options_dict[option]) except Exception as e: # pragma: no cover configs[image_name][option] = options_dict[ option] if 'links' in configs[image_name]: for link in configs[image_name]['links']: if link in name_maps: configs[image_name]['links'][ name_maps[link]] = configs[image_name][ 'links'].pop(link) # TODO network_mode # TODO volumes_from # TODO external services except Exception as e: # pragma: no cover failed_images.add(image_name) status = (False, str(e)) if config.has_option(section, 'gpu') and image_name in configs: try: options_dict = json.loads(config.get(section, 'gpu')) if 'enabled' in options_dict: enabled = options_dict['enabled'] if enabled == 'yes': configs[image_name]['gpu_options'] = options_dict labels['vent.gpu'] = 'yes' if 'dedicated' in options_dict: labels['vent.gpu.dedicated'] = options_dict[ 'dedicated'] if 'device' in options_dict: labels['vent.gpu.device'] = options_dict[ 'device'] if 'mem_mb' in options_dict: labels['vent.gpu.mem_mb'] = options_dict[ 'mem_mb'] port = '' host = '' if (vent_config.has_section('nvidia-docker-plugin') and vent_config.has_option( 'nvidia-docker-plugin', 'port')): port = vent_config.get('nvidia-docker-plugin', 'port') else: port = '3476' if (vent_config.has_section('nvidia-docker-plugin') and vent_config.has_option( 'nvidia-docker-plugin', 'host')): host = vent_config.get('nvidia-docker-plugin', 'host') else: # grab the default gateway try: route = Popen(('/sbin/ip', 'route'), stdout=PIPE) h = check_output( ('awk', '/default/ {print$3}'), stdin=route.stdout) route.wait() host = h.strip() except Exception as e: # pragma no cover logger.error("Default gateway " "went wrong " + str(e)) nd_url = 'http://' + host + ':' + port + '/v1.0/docker/cli' params = {'vol': 'nvidia_driver'} try: r = requests.get(nd_url, params=params) if r.status_code == 200: options = r.text.split() for option in options: if option.startswith( '--volume-driver='): configs[image_name][ 'volume_driver'] = option.split( "=", 1)[1] elif option.startswith('--volume='): vol = option.split("=", 1)[1].split(":") if 'volumes' in configs[ image_name]: # !! TODO handle if volumes is a list configs[image_name]['volumes'][ vol[0]] = { 'bind': vol[1], 'mode': vol[2] } else: configs[image_name][ 'volumes'] = { vol[0]: { 'bind': vol[1], 'mode': vol[2] } } elif option.startswith('--device='): dev = option.split("=", 1)[1] if 'devices' in configs[ image_name]: configs[image_name][ 'devices'].append(dev + ":" + dev + ":rwm") else: configs[image_name][ 'devices'] = [ dev + ":" + dev + ":rwm" ] else: # unable to parse option provided by # nvidia-docker-plugin pass except Exception as e: # pragma: no cover failed_images.add(image_name) status = (False, str(e)) print("Failure with nvidia-docker-plugin: " + str(e)) except Exception as e: # pragma: no cover failed_images.add(image_name) status = (False, str(e)) print("Unable to process gpu options: " + str(e)) path_cmd[image_name] = path orig_path_d[image_name] = orig_path labels_d[image_name] = labels # TODO get syslog address rather than hardcode # TODO add group label # TODO get group and name for syslog tag log_config = { 'type': 'syslog', 'config': { 'syslog-address': 'tcp://0.0.0.0:514', 'syslog-facility': 'daemon', 'tag': path.rsplit('.', 1)[-1] } } # setup gpu queue can_queue_gpu = True try: q = Queue(connection=Redis(host=r_host), default_timeout=86400) except Exception as e: # pragma: no cover can_queue_gpu = False print("Unable to connect to redis: " + str(e)) # start containers for image in images: if image not in failed_images: orig_path = orig_path_d[image] labels = labels_d[image] if orig_path: # replay_pcap is special so we can't bind it like normal # since the plugin takes in an additional argument dir_path = orig_path.rsplit('/', 1)[0] else: dir_path = path.rsplit('/', 1)[0] volumes = {dir_path: {'bind': dir_path, 'mode': 'rw'}} if 'volumes' in configs[image]: for volume in volumes: configs[image]['volumes'][volume] = volumes[volume] else: configs[image]['volumes'] = volumes if 'vent.gpu' in labels and labels['vent.gpu'] == 'yes': if can_queue_gpu: # queue up containers requiring a gpu q_str = json.dumps({ 'image': image, 'command': path_cmd[image], 'labels': labels, 'detach': True, 'log_config': log_config, 'configs': configs[image] }) q.enqueue('watch.gpu_queue', q_str, ttl=2592000) else: failed_images.add(image) else: if 'gpu_options' in configs[image]: del configs[image]['gpu_options'] d_client.containers.run(image=image, command=path_cmd[image], labels=labels, detach=True, log_config=log_config, **configs[image]) if failed_images: status = (False, failed_images) else: status = (True, images) except Exception as e: # pragma: no cover status = (False, str(e)) print('Error on line {}'.format(sys.exc_info()[-1].tb_lineno)) print("Failed to process job: " + str(e)) print(str(status)) return status
from os.path import expanduser from os.path import join from subprocess import check_output from subprocess import PIPE from subprocess import Popen from subprocess import STDOUT import docker import pkg_resources import requests from vent.helpers.logs import Logger from vent.helpers.paths import PathDirs from vent.helpers.templates import Template logger = Logger(__name__) def Logs(c_type=None, grep_list=None): """ Generically filter logs stored in log containers """ def get_logs(logs, log_entries): try: for log in logs: if str(container.name) in log_entries: log_entries[str(container.name)].append(log) else: log_entries[str(container.name)] = [log] except Exception as e: # pragma: no cover logger.error('Unable to get logs for ' + str(container.name) + ' because: ' + str(e))
def __init__(self, manifest): self.manifest = manifest self.d_client = docker.from_env() self.logger = Logger(__name__)
class Plugin: """ Handle Plugins """ def __init__(self, **kargs): self.path_dirs = PathDirs(**kargs) self.manifest = os.path.join(self.path_dirs.meta_dir, "plugin_manifest.cfg") self.d_client = docker.from_env() self.logger = Logger(__name__) def apply_path(self, repo): """ Set path to where the repo is and return original path """ self.logger.info("Starting: apply_path") self.logger.info("repo given: " + str(repo)) status = (True, None) try: # rewrite repo for consistency if repo.endswith(".git"): repo = repo.split(".git")[0] # get org and repo name and path repo will be cloned to org, name = repo.split("/")[-2:] self.path = os.path.join(self.path_dirs.plugins_dir, org, name) self.logger.info("cloning to path: " + str(self.path)) # save current path cwd = os.getcwd() # set to new repo path os.chdir(self.path) status = (True, cwd) except Exception as e: self.logger.error("apply_path failed with error: " + str(e)) status = (False, e) self.logger.info("Status of apply_path: " + str(status)) self.logger.info("Finished: apply_path") return status def repo_branches(self, repo): """ Get the branches of a repository """ self.logger.info("Starting: repo_branches") self.logger.info("repo given: " + str(repo)) status = (True, None) branches = [] try: # switch to directory where repo will be cloned to status = self.apply_path(repo) if status[0]: cwd = status[1] else: self.logger.info( "apply_path failed. Exiting repo_branches with status " + str(status)) return status junk = subprocess.check_output(shlex.split("git pull --all"), stderr=subprocess.STDOUT, close_fds=True) branch_output = subprocess.check_output( shlex.split("git branch -a"), stderr=subprocess.STDOUT, close_fds=True) branch_output = branch_output.split("\n") for branch in branch_output: b = branch.strip() if b.startswith('*'): b = b[2:] if "/" in b: branches.append(b.rsplit('/', 1)[1]) elif b: branches.append(b) branches = list(set(branches)) self.logger.info("branches found: " + str(branches)) for branch in branches: try: junk = subprocess.check_output( shlex.split("git checkout " + branch), stderr=subprocess.STDOUT, close_fds=True) except Exception as e: # pragma: no cover self.logger.error("repo_branches failed with error: " + str(e) + " on branch: " + str(branch)) status = (False, e) self.logger.info("Exiting repo_branches with status: " + str(status)) return status try: os.chdir(cwd) except Exception as e: # pragma: no cover self.logger.error("unable to change directory to: " + str(cwd) + "because: " + str(e)) status = (True, branches) except Exception as e: self.logger.error("repo_branches failed with error: " + str(e)) status = (False, e) self.logger.info("Status of repo_branches: " + str(status)) self.logger.info("Finished: repo_branches") return status def repo_commits(self, repo): """ Get the commit IDs for all of the branches of a repository """ self.logger.info("Starting: repo_commits") self.logger.info("repo given: " + str(repo)) status = (True, None) commits = [] try: status = self.apply_path(repo) # switch to directory where repo will be cloned to if status[0]: cwd = status[1] else: self.logger.info( "apply_path failed. Exiting repo_commits with status: " + str(status)) return status status = self.repo_branches(repo) if status[0]: branches = status[1] for branch in branches: try: branch_output = subprocess.check_output( shlex.split("git rev-list " + branch), stderr=subprocess.STDOUT, close_fds=True) branch_output = ['HEAD' ] + branch_output.split("\n")[:-1] commits.append((branch, branch_output)) except Exception as e: # pragma: no cover self.logger.error("repo_commits failed with error: " + str(e) + " on branch: " + str(branch)) status = (False, e) self.logger.info("Exiting repo_commits with status: " + str(status)) return status else: self.logger.info( "repo_branches failed. Exiting repo_commits with status: " + str(status)) return status try: os.chdir(cwd) except Exception as e: # pragma: no cover self.logger.error("unable to change directory to: " + str(cwd) + " because: " + str(e)) status = (True, commits) except Exception as e: self.logger.error("repo_commits failed with error: " + str(e)) status = (False, e) self.logger.info("Status of repo_commits: " + str(status)) self.logger.info("Finished: repo_commits") return status def repo_tools(self, repo, branch, version): """ Get available tools for a repository branch at a version """ self.logger.info("Starting: repo_tools") self.logger.info("repo given: " + str(repo)) self.logger.info("branch given: " + str(branch)) self.logger.info("version given: " + str(version)) status = (True, None) try: tools = [] status = self.apply_path(repo) # switch to directory where repo will be cloned to if status[0]: cwd = status[1] else: self.logger.info( "apply_path failed. Exiting repo_tools with status: " + str(status)) return status self.branch = branch self.version = version status = self.checkout() if status[0]: tools = self._available_tools() else: self.logger.info( "checkout failed. Exiting repo_tools with status: " + str(status)) return status try: os.chdir(cwd) except Exception as e: # pragma: no cover self.logger.error("unable to change directory to: " + str(cwd) + " because: " + str(e)) status = (True, tools) except Exception as e: self.logger.error("repo_tools failed with error: " + str(e)) status = (False, e) self.logger.info("Status of repo_tools: " + str(status)) self.logger.info("Finished: repo_tools") return status def clone(self, repo, user=None, pw=None): """ Clone the repository """ self.logger.info("Starting: clone") self.logger.info("repo given: " + str(repo)) self.logger.info("user given: " + str(user)) status = (True, None) try: self.org = None self.name = None self.repo = repo # save current path cwd = os.getcwd() self.logger.info("current working directory: " + str(cwd)) # rewrite repo for consistency if self.repo.endswith(".git"): self.repo = self.repo.split(".git")[0] # get org and repo name and path repo will be cloned to self.org, self.name = self.repo.split("/")[-2:] self.logger.info("org name found: " + str(self.org)) self.logger.info("repo name found: " + str(self.name)) self.path = os.path.join(self.path_dirs.plugins_dir, self.org, self.name) self.logger.info("path to clone to: " + str(self.path)) # check if the directory exists, if so return now status = self.path_dirs.ensure_dir(self.path) if not status[0]: self.logger.info( "ensure_dir failed. Exiting clone with status: " + str(status)) return status # set to new repo path os.chdir(self.path) # if path already exists, try git checkout to update if status[0] and status[1] == 'exists': try: response = subprocess.check_output( shlex.split("git -C " + self.path + " rev-parse"), stderr=subprocess.STDOUT, close_fds=True) self.logger.info("path already exists: " + str(self.path)) status = (True, cwd) self.logger.info("Status of clone: " + str(status)) self.logger.info("Finished: clone") return status except Exception as e: # pragma: no cover self.logger.error("unable to checkout: " + str(path) + " because: " + str(e)) status = (False, e) self.logger.info("Exiting clone with status: " + str(status)) return status # ensure cloning still works even if ssl is broken...probably should be improved response = subprocess.check_output( shlex.split("git config --global http.sslVerify false"), stderr=subprocess.STDOUT, close_fds=True) # check if user and pw were supplied, typically for private repos if user and pw: # only https is supported when using user/pw repo = 'https://' + user + ':' + pw + '@' + self.repo.split( "https://")[-1] # clone repo and build tools response = subprocess.check_output( shlex.split("git clone --recursive " + repo + " ."), stderr=subprocess.STDOUT, close_fds=True) status = (True, cwd) except Exception as e: self.logger.error("clone failed with error: " + str(e)) status = (False, e) self.logger.info("Status of clone: " + str(status)) self.logger.info("Finished: clone") return status def add(self, repo, tools=None, overrides=None, version="HEAD", branch="master", build=True, user=None, pw=None, groups=None, version_alias=None, wild=None, remove_old=True, disable_old=True, limit_groups=None): """ Adds a plugin of tool(s) tools is a list of tuples, where the pair is a tool name (path to Dockerfile) and version tools are for explicitly limiting which tools and versions (if version in tuple is '', then defaults to version) overrides is a list of tuples, where the pair is a tool name (path to Dockerfile) and a version overrides are for explicitly removing tools and overriding versions of tools (if version in tuple is '', then tool is removed, otherwise that tool is checked out at the specific version in the tuple) if tools and overrides are left as empty lists, then all tools in the repo are pulled down at the version and branch specified or defaulted to version is globally set for all tools, unless overridden in tools or overrides branch is globally set for all tools build is a boolean of whether or not to build the tools now user is the username for a private repo if needed pw is the password to go along with the username for a private repo groups is globally set for all tools version_alias is globally set for all tools and is a mapping from a friendly version tag to the real version commit ID wild lets you specify individual overrides for additional values in the tuple of tools or overrides. wild is a list containing one or more of the following: branch, build, groups, version_alias the order of the items in the wild list will expect values to be tacked on in the same order to the tuple for tools and overrides in additional to the tool name and version remove_old lets you specify whether or not to remove previously found tools that match to ones being added currently (note does not stop currently running instances of the older version) disable_old lets you specify whether or not to disable previously found tools that match to ones being added currently (note does not stop currently running instances of the older version) limit_groups is a list of groups to build tools for that match group names in vent.template of each tool if exists Examples: repo=fe (get all tools from repo 'fe' at version 'HEAD' on branch 'master') repo=foo, version="3d1f", branch="foo" (get all tools from repo 'foo' at verion '3d1f' on branch 'foo') repo=foo, tools=[('bar', ''), ('baz', '1d32')] (get only 'bar' from repo 'foo' at version 'HEAD' on branch 'master' and 'baz' from repo 'foo' at version '1d32' on branch 'master', ignore all other tools in repo 'foo') repo=foo overrides=[('baz/bar', ''), ('.', '1c4e')], version='4fad' (get all tools from repo 'foo' at verion '4fad' on branch 'master' except 'baz/bar' and for tool '.' get version '1c4e') repo=foo tools=[('bar', '1a2d')], overrides=[('baz', 'f2a1')] (not a particularly useful example, but get 'bar' from 'foo' at version '1a2d' and get 'baz' from 'foo' at version 'f2a1' on branch 'master', ignore all other tools) """ # initialize and store class objects self.tools = tools self.overrides = overrides self.version = version self.branch = branch self.build = build self.groups = groups # TODO these need to be implemented self.version_alias = version_alias self.wild = wild self.remove_old = remove_old self.disable_old = disable_old self.limit_groups = limit_groups status = (True, None) status_code, cwd = self.clone(repo, user=user, pw=pw) status = self._build_tools(status_code) # set back to original path try: os.chdir(cwd) except Exception as e: # pragma: no cover pass return status @ErrorHandler def builder(self, template, match_path, image_name, section, build=None, branch=None, version=None): """ Build tools """ self.logger.info("Starting: builder") self.logger.info("install path: " + str(match_path)) self.logger.info("image name: " + str(image_name)) self.logger.info("build: " + str(build)) self.logger.info("branch: " + str(branch)) self.logger.info("version: " + str(version)) if build: self.build = build elif not hasattr(self, 'build'): self.build = True if branch: self.branch = branch elif not hasattr(self, 'branch'): self.branch = 'master' if version: self.version = version elif not hasattr(self, 'version'): self.version = 'HEAD' cwd = os.getcwd() self.logger.info("current working directory: " + str(cwd)) try: os.chdir(match_path) except Exception as e: self.logger.error("unable to change to directory: " + str(match_path) + " because: " + str(e)) return None template = self._build_image(template, match_path, image_name, section) try: os.chdir(cwd) except Exception as e: # pragma: no cover self.logger.error("unable to change to directory: " + str(cwd) + " because: " + str(e)) self.logger.info("template of builder: " + str(template)) self.logger.info("Finished: builder") return template def _build_tools(self, status): """ Create list of tools, paths, and versions to be built and sends them to build_manifest """ response = (True, None) # !! TODO implement features: wild, remove_old, disable_old, limit_groups # check result of clone, ensure successful or that it already exists if status: response = self.checkout() if response[0]: matches = [] if self.tools is None and self.overrides is None: # get all tools matches = self._available_tools() elif self.tools is None: # there's only something in overrides # grab all the tools then apply overrides matches = self._available_tools() # !! TODO apply overrides to matches elif self.overrides is None: # there's only something in tools # only grab the tools specified matches = self.get_tool_matches() else: # both tools and overrides were specified # grab only the tools specified, with the overrides applied orig_matches = self.get_tool_matches() matches = orig_matches for override in self.overrides: override_t = None if override[0] == '.': override_t = ('', override[1]) else: override_t = override for match in orig_matches: if override_t[0] == match[0]: matches.remove(match) matches.append(override_t) if len(matches) > 0: self._build_manifest(matches) else: response = (False, status) return response def get_tool_matches(self): """ Get the tools paths and versions that were specified by self.tools and self.version """ matches = [] if not hasattr(self, 'tools'): self.tools = [] if not hasattr(self, 'version'): self.version = 'HEAD' for tool in self.tools: match_version = self.version if tool[1] != '': match_version = tool[1] match = '' if tool[0].endswith('/'): match = tool[0][:-1] elif tool[0] != '.': match = tool[0] if not match.startswith('/') and match != '': match = '/' + match matches.append((match, match_version)) return matches def _build_manifest(self, matches): """ Builds and writes the manifest for the tools being added """ # !! TODO check for pre-existing that conflict with request and disable and/or remove image for match in matches: template = Template(template=self.manifest) # !! TODO check for special settings here first for the specific match self.version = match[1] response = self.checkout() if response[0]: section = self.org + ":" + self.name + ":" + match[ 0] + ":" + self.branch + ":" + self.version match_path = self.path + match[0] image_name = self.org + "-" + self.name + "-" if match[0] != '': # if tool is in a subdir, add that to the name of the image image_name += '-'.join(match[0].split('/')[1:]) + "-" image_name += self.branch + ":" + self.version # check if the section already exists exists, options = template.section(section) previous_commit = None previous_commits = None head = False if exists: for option in options: # TODO check if tool name but a different version exists - then disable/remove if set if option[0] == 'version' and option[1] == 'HEAD': head = True if option[0] == 'built' and option[1] == 'yes': # !! TODO remove pre-existing image pass if option[0] == 'commit_id': previous_commit = option[1] if option[0] == 'previous_versions': previous_commits = option[1] # !! TODO # check if section should be removed from config - i.e. all tools, # but new commit removed one that was in a previous commit # set template section and options for tool at version and branch template.add_section(section) template.set_option(section, "name", match[0].split('/')[-1]) template.set_option(section, "namespace", self.org + '/' + self.name) template.set_option(section, "path", match_path) template.set_option(section, "repo", self.repo) template.set_option(section, "enabled", "yes") template.set_option(section, "branch", self.branch) template.set_option(section, "version", self.version) template.set_option(section, "last_updated", str(datetime.datetime.utcnow()) + " UTC") template.set_option(section, "image_name", image_name) vent_template = Template( template=os.path.join(match_path, 'vent.template')) vent_status, response = vent_template.option("info", "name") if vent_status: template.set_option(section, "link_name", response) else: template.set_option(section, "link_name", match[0].split('/')[-1]) commit_id = None if self.version == 'HEAD': os.chdir(match_path) commit_id = subprocess.check_output( shlex.split("git rev-parse --short HEAD"), stderr=subprocess.STDOUT, close_fds=True).strip() template.set_option(section, "commit_id", commit_id) if head: # no need to store previous commits if not HEAD, since # the version will always be the same commit ID if previous_commit and previous_commit != commit_id: if previous_commits and previous_commit not in previous_commits: previous_commits = previous_commit + ',' + previous_commits elif not previous_commits: previous_commits = previous_commit if previous_commits and previous_commits != commit_id: template.set_option(section, "previous_versions", previous_commits) if self.version_alias: template.set_option(section, "version_alias", self.version_alias) if self.groups: template.set_option(section, "groups", self.groups) else: vent_template = os.path.join(match_path, 'vent.template') if os.path.exists(vent_template): v_template = Template(template=vent_template) groups = v_template.option("info", "groups") if groups[0]: template.set_option(section, "groups", groups[1]) template = self._build_image(template, match_path, image_name, section) # write out configuration to the manifest file template.write_config() # reset to repo directory os.chdir(self.path) return def _build_image(self, template, match_path, image_name, section): """ Build docker images and store results in template """ # !! TODO return status of whether it built successfully or not if self.build: try: os.chdir(match_path) # currently can't use docker-py because it doesn't support labels on images yet name = template.option(section, "name") groups = template.option(section, "groups") if groups[1] == "" or not groups[0]: groups = (True, "none") if not name[0]: name = (True, image_name) # pull if '/' in image_name, fallback to build pull = False if '/' in image_name: try: self.logger.info("Trying to pull " + image_name) output = subprocess.check_output( shlex.split("docker pull " + image_name), stderr=subprocess.STDOUT, close_fds=True) self.logger.info("Pulling " + name[1] + "\n" + str(output)) for line in output.split('\n'): if line.startswith("Digest: sha256:"): image_id = line.split( "Digest: sha256:")[1][:12] if image_id: template.set_option(section, "built", "yes") template.set_option(section, "image_id", image_id) template.set_option( section, "last_updated", str(datetime.datetime.utcnow()) + " UTC") status = (True, "Pulled " + image_name) self.logger.info(str(status)) else: template.set_option(section, "built", "failed") template.set_option( section, "last_updated", str(datetime.datetime.utcnow()) + " UTC") status = (False, "Failed to pull image " + str(output.split('\n')[-1])) self.logger.warning(str(status)) pull = True except Exception as e: # pragma: no cover self.logger.warning( "Failed to pull image, going to build instead: " + str(e)) if not pull: output = subprocess.check_output(shlex.split( "docker build --label vent --label vent.name=" + name[1] + " --label vent.groups=" + groups[1] + " -t " + image_name + " ."), stderr=subprocess.STDOUT, close_fds=True) self.logger.info("Building " + name[1] + "\n" + str(output)) image_id = "" for line in output.split("\n"): if line.startswith("Successfully built "): image_id = line.split( "Successfully built ")[1].strip() template.set_option(section, "built", "yes") template.set_option(section, "image_id", image_id) template.set_option( section, "last_updated", str(datetime.datetime.utcnow()) + " UTC") except Exception as e: # pragma: no cover self.logger.error("unable to build image: " + str(image_name) + " because: " + str(e)) template.set_option(section, "built", "failed") template.set_option(section, "last_updated", str(datetime.datetime.utcnow()) + " UTC") else: template.set_option(section, "built", "no") template.set_option(section, "last_updated", str(datetime.datetime.utcnow()) + " UTC") return template def _available_tools(self, groups=None): """ Return list of possible tools in repo for the given version and branch """ matches = [] if not hasattr(self, 'path'): return matches if groups: groups = groups.split(",") for root, dirnames, filenames in os.walk(self.path): for filename in fnmatch.filter(filenames, 'Dockerfile'): # !! TODO deal with wild/etc.? if groups: try: template = Template( template=os.path.join(root, 'vent.template')) for group in groups: template_groups = template.option("info", "groups") if template_groups[0] and group in template_groups[ 1]: matches.append( (root.split(self.path)[1], self.version)) except Exception as e: # pragma: no cover pass else: matches.append((root.split(self.path)[1], self.version)) return matches def checkout(self): """ Checkout a specific version and branch of a repo """ if not hasattr(self, 'branch'): self.branch = 'master' if not hasattr(self, 'version'): self.version = 'HEAD' response = (True, None) try: status = subprocess.check_output(shlex.split("git checkout " + self.branch), stderr=subprocess.STDOUT, close_fds=True) status = subprocess.check_output(shlex.split("git pull"), stderr=subprocess.STDOUT, close_fds=True) status = subprocess.check_output(shlex.split("git reset --hard " + self.version), stderr=subprocess.STDOUT, close_fds=True) response = (True, status) except Exception as e: # pragma: no cover response = (False, os.getcwd() + str(e)) return response @staticmethod def add_image(image, tag="latest"): """ Add an image from a registry/hub rather than building from a repository """ # !! TODO return def constraint_options(self, constraint_dict, options): """ Return result of constraints and options against a template """ constraints = {} template = Template(template=self.manifest) for constraint in constraint_dict: if constraint != 'self': if constraint_dict[constraint] or constraint_dict[ constraint] == '': constraints[constraint] = constraint_dict[constraint] results = template.constrained_sections(constraints=constraints, options=options) return results, template def tools(self): """ Return list of tuples of all tools """ tools = [] template = Template(template=self.manifest) exists, sections = template.sections() if exists: for section in sections: options = { 'section': section, 'enabled': None, 'built': None, 'version': None, 'repo': None, 'branch': None, 'name': None, 'groups': None, 'image_name': None } for option in options.keys(): exists, value = template.option(section, option) if exists: options[option] = value tools.append(options) return tools def remove(self, name=None, repo=None, namespace=None, branch="master", groups=None, enabled="yes", version="HEAD", built="yes"): """ Remove tool (name) or repository, repository is the url. If no arguments are specified, all tools will be removed for the defaults. """ # initialize args = locals() status = (True, None) # get resulting dictionary of sections with options that match constraints results, template = self.constraint_options(args, []) for result in results: response, image_name = template.option(result, 'image_name') # check for container and remove container_name = image_name.replace(':', '-').replace('/', '-') try: container = self.d_client.containers.get(container_name) response = container.remove(v=True, force=True) self.logger.info(response) self.logger.info("Removing plugin container: " + container_name) except Exception as e: # pragma: no cover self.logger.warn("Unable to remove the plugin container: " + container_name + " because: " + str(e)) # check for image and remove try: response = self.d_client.images.remove(image_name) self.logger.info(response) self.logger.info("Removing plugin image: " + image_name) except Exception as e: # pragma: no cover self.logger.warn("Unable to remove the plugin image: " + image_name + " because: " + str(e)) # remove tool from the manifest status = template.del_section(result) self.logger.info("Removing plugin tool: " + result) # TODO if all tools from a repo have been removed, remove the repo template.write_config() return status def update(self, name=None, repo=None, namespace=None, branch=None, groups=None): """ Update tool (name) or repository, repository is the url. If no arguments are specified, all tools will be updated """ # initialize args = locals() status = (False, None) options = ['branch', 'groups', 'image_name'] # get resulting dictionary of sections with options that match constraints results, template = self.constraint_options(args, options) for result in results: # check for container and remove try: container_name = results['image_name'].replace(':', '-') \ .replace('/', '-') container = self.d_client.containers.get(container_name) response = container.remove(v=True, force=True) except Exception as e: # pragma: no cover pass # TODO git pull # TODO build # TODO docker pull # TODO update tool in the manifest self.logger.info("Updating plugin tool: " + result) template.write_config() return status # !! TODO name or group ? def versions(self, name, namespace=None, branch="master"): """ Return available versions of a tool """ # initialize args = locals() versions = [] options = ['version', 'previous_versions'] # get resulting dictionary of sections with options that match constraints results, _ = self.constraint_options(args, options) for result in results: version_list = [results[result]['version']] if 'previous_versions' in results[result]: version_list = version_list + ( results[result]['previous_versions']).split(',') versions.append((result, version_list)) return versions # !! TODO name or group ? def current_version(self, name, namespace=None, branch="master"): """ Return current version for a given tool """ # initialize args = locals() versions = [] options = ['version'] # get resulting dictionary of sections with options that match constraints results, _ = self.constraint_options(args, options) for result in results: versions.append((result, results[result]['version'])) return versions # !! TODO name or group ? def state(self, name, namespace=None, branch="master"): """ Return state of a tool, disabled/enabled for each version """ # initialize args = locals() states = [] options = ['enabled'] # get resulting dictionary of sections with options that match constraints results, _ = self.constraint_options(args, options) for result in results: if results[result]['enabled'] == 'yes': states.append((result, 'enabled')) else: states.append((result, 'disabled')) return states # !! TODO name or group ? def enable(self, name, namespace=None, branch="master", version="HEAD"): """ Enable tool at a specific version, default to head """ # initialize args = locals() status = (False, None) # get resulting dictionary of sections with options that match constraints results, template = self.constraint_options(args, []) for result in results: status = template.set_option(result, 'enabled', 'yes') template.write_config() return status # !! TODO name or group ? def disable(self, name, namespace=None, branch="master", version="HEAD"): """ Disable tool at a specific version, default to head """ # initialize args = locals() status = (False, None) # get resulting dictionary of sections with options that match constraints results, template = self.constraint_options(args, []) for result in results: status = template.set_option(result, 'enabled', 'no') template.write_config() return status
class Plugin: """ Handle Plugins """ def __init__(self, **kargs): self.path_dirs = PathDirs(**kargs) self.manifest = join(self.path_dirs.meta_dir, "plugin_manifest.cfg") self.p_helper = PluginHelper(**kargs) self.d_client = docker.from_env() self.logger = Logger(__name__) def add(self, repo, tools=None, overrides=None, version="HEAD", branch="master", build=True, user=None, pw=None, groups=None, version_alias=None, wild=None, remove_old=True, disable_old=True, limit_groups=None, core=False): """ Adds a plugin of tool(s) tools is a list of tuples, where the pair is a tool name (path to Dockerfile) and version tools are for explicitly limiting which tools and versions (if version in tuple is '', then defaults to version) overrides is a list of tuples, where the pair is a tool name (path to Dockerfile) and a version overrides are for explicitly removing tools and overriding versions of tools (if version in tuple is '', then tool is removed, otherwise that tool is checked out at the specific version in the tuple) if tools and overrides are left as empty lists, then all tools in the repo are pulled down at the version and branch specified or defaulted to version is globally set for all tools, unless overridden in tools or overrides branch is globally set for all tools build is a boolean of whether or not to build the tools now user is the username for a private repo if needed pw is the password to go along with the username for a private repo groups is globally set for all tools version_alias is globally set for all tools and is a mapping from a friendly version tag to the real version commit ID wild lets you specify individual overrides for additional values in the tuple of tools or overrides. wild is a list containing one or more of the following: branch, build, groups, version_alias the order of the items in the wild list will expect values to be tacked on in the same order to the tuple for tools and overrides in additional to the tool name and version remove_old lets you specify whether or not to remove previously found tools that match to ones being added currently (note does not stop currently running instances of the older version) disable_old lets you specify whether or not to disable previously found tools that match to ones being added currently (note does not stop currently running instances of the older version) limit_groups is a list of groups to build tools for that match group names in vent.template of each tool if exists Examples: - repo=fe: (get all tools from repo 'fe' at version 'HEAD' on branch 'master') - repo=foo, version="3d1f", branch="foo": (get all tools from repo 'foo' at verion '3d1f' on branch 'foo') - repo=foo, tools=[('bar', ''), ('baz', '1d32')]: (get only 'bar' from repo 'foo' at version 'HEAD' on branch 'master' and 'baz' from repo 'foo' at version '1d32' on branch 'master', ignore all other tools in repo 'foo') - repo=foo overrides=[('baz/bar', ''), ('.', '1c4e')], version='4fad': (get all tools from repo 'foo' at verion '4fad' on branch 'master' except 'baz/bar' and for tool '.' get version '1c4e') - repo=foo tools=[('bar', '1a2d')], overrides=[('baz', 'f2a1')]: (not a particularly useful example, but get 'bar' from 'foo' at version '1a2d' and get 'baz' from 'foo' at version 'f2a1' on branch 'master', ignore all other tools) """ # initialize and store class objects self.repo = repo.lower() self.tools = tools if (isinstance(self.tools, list) and len(self.tools) == 0): self.tools = None self.overrides = overrides self.version = version self.branch = branch self.build = build self.groups = groups self.core = core self.path, self.org, self.name = self.p_helper.get_path(repo, core=core) # TODO these need to be implemented self.version_alias = version_alias self.wild = wild self.remove_old = remove_old self.disable_old = disable_old self.limit_groups = limit_groups status = (True, None) status_code, _ = self.p_helper.clone(self.repo, user=user, pw=pw) self.p_helper.apply_path(self.repo) status = self._build_tools(status_code) return status @ErrorHandler def add_image(self, image, link_name, tag=None, registry=None, groups=None): """ Add an image with a tag from a Docker registry. Defaults to the Docker Hub if not specified. Use a Template object to write an image's information to `plugin_manifest.cfg' Args: image(type): docker image link_name(type): fill me Kwargs: tag(type): registry(type): groups(type): Group that the docker image belongs to. Returns: tuple(bool,str): if the function completed successfully, (True, name of image). If the function failed, (False, message about failure) """ status = (True, None) try: pull_name = image org = '' name = image if '/' in image: org, name = image.split('/') else: org = "official" if not tag: tag = "latest" if not registry: registry = "docker.io" full_image = registry + "/" + image + ":" + tag image = self.d_client.images.pull(full_image) section = ':'.join([registry, org, name, '', tag]) namespace = org + '/' + name # set template section and options for tool at version and branch template = Template(template=self.manifest) template.add_section(section) template.set_option(section, "name", name) template.set_option(section, "pull_name", pull_name) template.set_option(section, "namespace", namespace) template.set_option(section, "path", "") template.set_option(section, "repo", registry + '/' + org) template.set_option(section, "enabled", "yes") template.set_option(section, "branch", "") template.set_option(section, "version", tag) template.set_option(section, "last_updated", str(datetime.utcnow()) + " UTC") template.set_option(section, "image_name", image.attrs['RepoTags'][0]) template.set_option(section, "type", "registry") template.set_option(section, "link_name", link_name) template.set_option(section, "commit_id", "") template.set_option(section, "built", "yes") template.set_option(section, "image_id", image.attrs['Id'].split(':')[1][:12]) template.set_option(section, "groups", groups) # write out configuration to the manifest file template.write_config() status = (True, "Successfully added " + full_image) except Exception as e: # pragma: no cover self.logger.error("Couldn't add image because " + str(e)) status = (False, str(e)) return status @ErrorHandler def builder(self, template, match_path, image_name, section, build=None, branch=None, version=None): """ Build tools """ self.logger.info("Starting: builder") self.logger.info("install path: " + str(match_path)) self.logger.info("image name: " + str(image_name)) self.logger.info("build: " + str(build)) self.logger.info("branch: " + str(branch)) self.logger.info("version: " + str(version)) if build: self.build = build elif not hasattr(self, 'build'): self.build = True if branch: self.branch = branch elif not hasattr(self, 'branch'): self.branch = 'master' if version: self.version = version elif not hasattr(self, 'version'): self.version = 'HEAD' cwd = getcwd() self.logger.info("current working directory: " + str(cwd)) try: chdir(match_path) except Exception as e: # pragma: no cover self.logger.error("unable to change to directory: " + str(match_path) + " because: " + str(e)) return None template = self._build_image(template, match_path, image_name, section) chdir(cwd) # get untagged images untagged = None try: untagged = self.d_client.images.list(filters={ "label": "vent", "dangling": "true" }) except Exception as e: # pragma: no cover self.logger.error("unabled to get images to remove: " + str(e)) # remove untagged images if untagged: deleted_images = "" for image in untagged: deleted_images = '\n'.join([deleted_images, image.id]) try: self.d_client.images.remove(image.id, force=True) except Exception as e: # pragma: no cover self.logger.warning("unable to remove image: " + image.id + " because: " + str(e)) self.logger.info("removed dangling images:" + deleted_images) self.logger.info("template of builder: " + str(template)) self.logger.info("Finished: builder") return template def _build_tools(self, status): """ Create list of tools, paths, and versions to be built and sends them to build_manifest Args: status (tuple(bool, str)): Returns: response (tuple(bool, str)): If True, then the function performed as expected and the str is a string """ response = (True, None) # TODO implement features: wild, remove_old, disable_old, limit_groups # check result of clone, ensure successful or that it already exists if status: response = self.p_helper.checkout(branch=self.branch, version=self.version) if response[0]: search_groups = None if self.core: search_groups = 'core' matches = [] if self.tools is None and self.overrides is None: # get all tools matches = self.p_helper.available_tools( self.path, version=self.version, groups=search_groups) elif self.tools is None: # there's only something in overrides # grab all the tools then apply overrides matches = self.p_helper.available_tools( self.path, version=self.version, groups=search_groups) # !! TODO apply overrides to matches elif self.overrides is None: # there's only something in tools # only grab the tools specified matches = PluginHelper.tool_matches(tools=self.tools, version=self.version) else: # both tools and overrides were specified # grab only the tools specified, with the overrides applied o_matches = PluginHelper.tool_matches(tools=self.tools, version=self.version) matches = o_matches for override in self.overrides: override_t = None if override[0] == '.': override_t = ('', override[1]) else: override_t = override for match in o_matches: if override_t[0] == match[0]: matches.remove(match) matches.append(override_t) if len(matches) > 0: self._build_manifest(matches) else: response = (False, status) return response def _build_manifest(self, matches): """ Builds and writes the manifest for the tools being added """ # !! TODO check for pre-existing that conflict with request and # disable and/or remove image for match in matches: # keep track of whether or not to write an additional manifest # entry for multiple instances, and how many additional entries # to write addtl_entries = 0 # remove the .git for adding repo info to manifest if self.repo.endswith('.git'): self.repo = self.repo[:-4] # remove @ in match for template setting purposes if match[0].find('@') >= 0: true_name = match[0].split('@')[1] else: true_name = match[0] template = Template(template=self.manifest) # TODO check for special settings here first for the specific match self.version = match[1] response = self.p_helper.checkout(branch=self.branch, version=self.version) if response[0]: section = self.org + ":" + self.name + ":" + true_name + ":" section += self.branch + ":" + self.version # need to get rid of temp identifiers for tools in same repo match_path = self.path + match[0].split('@')[0] if not self.core: image_name = self.org + "-" + self.name + "-" if match[0] != '': # if tool is in a subdir, add that to the name of the # image image_name += '-'.join(match[0].split('/')[1:]) + "-" image_name += self.branch + ":" + self.version else: image_name = ('cyberreboot/vent-' + match[0].split('/')[-1] + ':' + self.branch) image_name = image_name.replace('_', '-') # check if the section already exists exists, options = template.section(section) previous_commit = None previous_commits = None head = False if exists: for option in options: # TODO check if tool name but a different version # exists - then disable/remove if set if option[0] == 'version' and option[1] == 'HEAD': head = True if option[0] == 'built' and option[1] == 'yes': # !! TODO remove pre-existing image pass if option[0] == 'commit_id': previous_commit = option[1] if option[0] == 'previous_versions': previous_commits = option[1] # check if tool comes from multi directory multi_tool = "no" if match[0].find('@') >= 0: multi_tool = "yes" # !! TODO # check if section should be removed from config i.e. all tools # but new commit removed one that was in a previous commit image_name = image_name.lower() if image_name.endswith(":head"): image_name = image_name.split(":head")[0] + ":HEAD" # set template section & options for tool at version and branch template.add_section(section) template.set_option(section, "name", true_name.split('/')[-1]) template.set_option(section, "namespace", self.org + '/' + self.name) template.set_option(section, "path", match_path) template.set_option(section, "repo", self.repo) template.set_option(section, "enabled", "yes") template.set_option(section, "multi_tool", multi_tool) template.set_option(section, "branch", self.branch) template.set_option(section, "version", self.version) template.set_option(section, "last_updated", str(datetime.utcnow()) + " UTC") template.set_option(section, "image_name", image_name.replace('@', '-')) template.set_option(section, "type", "repository") # save settings in vent.template to plugin_manifest # watch for multiple tools in same directory # just wanted to store match path with @ for path for use in # other actions tool_template = 'vent.template' if match[0].find('@') >= 0: tool_template = match[0].split('@')[1] + '.template' vent_template_path = join(match_path, tool_template) if os.path.exists(vent_template_path): with open(vent_template_path) as f: vent_template_val = f.read() else: vent_template_val = '' settings_dict = ParsedSections(vent_template_val) for setting in settings_dict: template.set_option(section, setting, json.dumps(settings_dict[setting])) # TODO do we need this if we save as a dictionary? vent_template = Template(vent_template_path) vent_status, response = vent_template.option("info", "name") if vent_status: template.set_option(section, "link_name", response) else: template.set_option(section, "link_name", true_name.split('/')[-1]) commit_id = None if self.version == 'HEAD': # remove @ in multi-tools chdir(match_path) cmd = "git rev-parse --short HEAD" commit_id = check_output(shlex.split(cmd), stderr=STDOUT, close_fds=True).strip() template.set_option(section, "commit_id", commit_id) if head: # no need to store previous commits if not HEAD, since # the version will always be the same commit ID if previous_commit and previous_commit != commit_id: if (previous_commits and previous_commit not in previous_commits): previous_commits = (previous_commit + ',' + previous_commits) elif not previous_commits: previous_commits = previous_commit if previous_commits and previous_commits != commit_id: template.set_option(section, "previous_versions", previous_commits) if self.version_alias: template.set_option(section, "version_alias", self.version_alias) if self.groups: template.set_option(section, "groups", self.groups) else: groups = vent_template.option("info", "groups") if groups[0]: template.set_option(section, "groups", groups[1]) # set groups to empty string if no groups defined for tool else: template.set_option(section, "groups", '') template = self._build_image(template, match_path, image_name, section) # write additional entries for multiple instances if addtl_entries > 0: # add 2 for naming conventions for i in range(2, addtl_entries + 2): addtl_section = section.rsplit(':', 2) addtl_section[0] += str(i) addtl_section = ':'.join(addtl_section) template.add_section(addtl_section) orig_vals = template.section(section)[1] for val in orig_vals: template.set_option(addtl_section, val[0], val[1]) template.set_option(addtl_section, "name", true_name.split('/')[-1] + str(i)) # write out configuration to the manifest file template.write_config() # reset to repo directory chdir(self.path) return def _build_image(self, template, match_path, image_name, section, build_local=False): """ Build docker images and store results in template """ def set_instances(template, section, built, image_id=None): """ Set build information for multiple instances """ self.logger.info("entering set_instances") i = 2 while True: addtl_section = section.rsplit(':', 2) addtl_section[0] += str(i) addtl_section = ':'.join(addtl_section) self.logger.info(addtl_section) if template.section(addtl_section)[0]: template.set_option(addtl_section, "built", built) if image_id: template.set_option(addtl_section, "image_id", image_id) template.set_option(addtl_section, "last_updated", Timestamp()) else: break i += 1 # determine whether a tool should be considered a multi instance try: settings_dict = json.loads(template.option(section, 'settings')[1]) if int(settings_dict['instances']) > 1: multi_instance = True else: multi_instance = False except Exception: multi_instance = False # !! TODO return status of whether it built successfully or not if self.build: cwd = getcwd() chdir(match_path) try: # currently can't use docker-py because it doesn't support # labels on images yet name = template.option(section, "name") groups = template.option(section, "groups") repo = template.option(section, "repo") t_type = template.option(section, "type") if groups[1] == "" or not groups[0]: groups = (True, "none") if not name[0]: name = (True, image_name) # pull if '/' in image_name, fallback to build pull = False if '/' in image_name and not build_local: try: self.logger.info("Trying to pull " + image_name) output = check_output(shlex.split("docker pull " + image_name), stderr=STDOUT, close_fds=True) self.logger.info("Pulling " + name[1] + "\n" + str(output)) i_attrs = self.d_client.images.get(image_name).attrs image_id = i_attrs['Id'].split(':')[1][:12] if image_id: template.set_option(section, "built", "yes") template.set_option(section, "image_id", image_id) template.set_option( section, "last_updated", str(datetime.utcnow()) + " UTC") # set other instances too if multi_instance: set_instances(template, section, 'yes', image_id) status = (True, "Pulled " + image_name) self.logger.info(str(status)) else: template.set_option(section, "built", "failed") template.set_option( section, "last_updated", str(datetime.utcnow()) + " UTC") # set other instances too if multi_instace: set_instances(template, section, 'failed') status = (False, "Failed to pull image " + str(output.split('\n')[-1])) self.logger.warning(str(status)) pull = True except Exception as e: # pragma: no cover self.logger.warning("Failed to pull image, going to" " build instead: " + str(e)) if not pull: # see if additional tags needed for images tagged at HEAD commit_tag = "" image_name = image_name.replace('@', '-') if image_name.endswith('HEAD'): commit_id = template.option(section, "commit_id") if commit_id[0]: commit_tag = (" -t " + image_name[:-4] + str(commit_id[1])) # see if additional file arg needed for building multiple # images from same directory file_tag = " ." multi_tool = template.option(section, 'multi_tool') if multi_tool[0] and multi_tool[1] == 'yes': specific_file = template.option(section, 'name')[1] if specific_file == 'unspecified': file_tag = " -f Dockerfile ." else: file_tag = " -f Dockerfile." + specific_file + " ." # update image name with new version for update image_name = image_name.rsplit(':', 1)[0] + ':' + self.version output = check_output( shlex.split("docker build --label" " vent --label" " vent.section=" + section + " --label" " vent.repo=" + repo[1] + " --label" " vent.type=" + t_type[1] + " --label" " vent.name=" + name[1] + " --label" " vent.groups=" + groups[1] + " -t " + image_name + commit_tag + file_tag), stderr=STDOUT, close_fds=True) self.logger.info("Building " + name[1] + "\n" + str(output)) image_id = "" for line in output.split("\n"): suc_str = "Successfully built " if line.startswith(suc_str): image_id = line.split(suc_str)[1].strip() template.set_option(section, "built", "yes") template.set_option(section, "image_id", image_id) template.set_option(section, "last_updated", str(datetime.utcnow()) + " UTC") # set other instances too if multi_instance: set_instances(template, section, 'yes', image_id) except Exception as e: # pragma: no cover self.logger.error("unable to build image: " + str(image_name) + " because: " + str(e)) template.set_option(section, "built", "failed") template.set_option(section, "last_updated", str(datetime.utcnow()) + " UTC") if multi_instance: set_instances(template, section, 'failed') chdir(cwd) else: template.set_option(section, "built", "no") template.set_option(section, "last_updated", str(datetime.utcnow()) + " UTC") if multi_instance: set_instances(template, section, 'no') template.set_option(section, 'running', 'no') return template def list_tools(self): """ Return list of tuples of all tools """ tools = [] template = Template(template=self.manifest) exists, sections = template.sections() if exists: for section in sections: options = { 'section': section, 'enabled': None, 'built': None, 'version': None, 'repo': None, 'branch': None, 'name': None, 'groups': None, 'image_name': None } for option in options.keys(): exists, value = template.option(section, option) if exists: options[option] = value tools.append(options) return tools def remove(self, name=None, repo=None, namespace=None, branch="master", groups=None, enabled="yes", version="HEAD", built="yes"): """ Remove tool (name) or repository, repository is the url. If no arguments are specified, all tools will be removed for the defaults. """ # initialize args = locals() # want to remove things from manifest regardless of if built del args['built'] status = (True, None) # get resulting dict of sections with options that match constraints results, template = self.p_helper.constraint_options(args, []) for result in results: response, image_name = template.option(result, 'image_name') name = template.option(result, 'name')[1] try: settings_dict = json.loads( template.option(result, 'settings')[1]) instances = int(settings_dict['instances']) except Exception: instances = 1 try: # check for container and remove c_name = image_name.replace(':', '-').replace('/', '-') for i in range(1, instances + 1): container_name = c_name + str(i) if i != 1 else c_name container = self.d_client.containers.get(container_name) response = container.remove(v=True, force=True) self.logger.info(response) self.logger.info("Removing plugin container: " + container_name) except Exception as e: # pragma: no cover self.logger.warn("Unable to remove the plugin container: " + container_name + " because: " + str(e)) # check for image and remove try: response = None image_id = template.option(result, 'image_id')[1] response = self.d_client.images.remove(image_id, force=True) self.logger.info(response) self.logger.info("Removing plugin image: " + image_name) except Exception as e: # pragma: no cover self.logger.warn("Unable to remove the plugin image: " + image_name + " because: " + str(e)) # remove tool from the manifest for i in range(1, instances + 1): res = result.rsplit(':', 2) res[0] += str(i) if i != 1 else '' res = ':'.join(res) if template.section(res)[0]: status = template.del_section(res) self.logger.info("Removing plugin tool: " + res) # TODO if all tools from a repo have been removed, remove the repo template.write_config() return status def update(self, name=None, repo=None, namespace=None, branch=None, groups=None): """ Update tool (name) or repository, repository is the url. If no arguments are specified, all tools will be updated """ # initialize args = locals() status = (False, None) options = ['branch', 'groups', 'image_name'] # get resulting dict of sections with options that match constraints results, template = self.p_helper.constraint_options(args, options) for result in results: # check for container and remove try: container_name = results['image_name'].replace(':', '-') \ .replace('/', '-') container = self.d_client.containers.get(container_name) container.remove(v=True, force=True) except Exception as e: # pragma: no cover self.logger.info("Error updating: " + str(result) + " because: " + str(e)) # TODO git pull # TODO build # TODO docker pull # TODO update tool in the manifest self.logger.info("Updating plugin tool: " + result) template.write_config() return status # !! TODO name or group ? def versions(self, name, namespace=None, branch="master"): """ Return available versions of a tool """ # initialize args = locals() versions = [] options = ['version', 'previous_versions'] # get resulting dict of sections with options that match constraints results, _ = self.p_helper.constraint_options(args, options) for result in results: version_list = [results[result]['version']] if 'previous_versions' in results[result]: version_list += (results[result]['previous_versions']) \ .split(',') versions.append((result, version_list)) return versions # !! TODO name or group ? def current_version(self, name, namespace=None, branch="master"): """ Return current version for a given tool """ # initialize args = locals() versions = [] options = ['version'] # get resulting dict of sections with options that match constraints results, _ = self.p_helper.constraint_options(args, options) for result in results: versions.append((result, results[result]['version'])) return versions # !! TODO name or group ? def state(self, name, namespace=None, branch="master"): """ Return state of a tool, disabled/enabled for each version """ # initialize args = locals() states = [] options = ['enabled'] # get resulting dict of sections with options that match constraints results, _ = self.p_helper.constraint_options(args, options) for result in results: if results[result]['enabled'] == 'yes': states.append((result, 'enabled')) else: states.append((result, 'disabled')) return states # !! TODO name or group ? def enable(self, name, namespace=None, branch="master", version="HEAD"): """ Enable tool at a specific version, default to head """ # initialize args = locals() status = (False, None) # get resulting dict of sections with options that match constraints results, template = self.p_helper.constraint_options(args, []) for result in results: status = template.set_option(result, 'enabled', 'yes') template.write_config() return status # !! TODO name or group ? def disable(self, name, namespace=None, branch="master", version="HEAD"): """ Disable tool at a specific version, default to head """ # initialize args = locals() status = (False, None) # get resulting dict of sections with options that match constraints results, template = self.p_helper.constraint_options(args, []) for result in results: status = template.set_option(result, 'enabled', 'no') template.write_config() return status def auto_install(self): """ Automatically detects images and installs them in the manifest if they are not there already """ template = Template(template=self.manifest) sections = template.sections() images = self.d_client.images.list(filters={'label': 'vent'}) add_sections = [] status = (True, None) for image in images: if ('Labels' in image.attrs and 'vent.section' in image.attrs['Config']['Labels'] and not image.attrs['Config']['Labels']['vent.section'] in sections[1]): section = image.attrs['Config']['Labels']['vent.section'] section_str = image.attrs['Config']['Labels'][ 'vent.section'].split(":") template.add_section(section) if 'vent.name' in image.attrs['Config']['Labels']: template.set_option( section, 'name', image.attrs['Config']['Labels']['vent.name']) if 'vent.repo' in image.attrs['Config']['Labels']: template.set_option( section, 'repo', image.attrs['Config']['Labels']['vent.repo']) git_path = join(self.path_dirs.plugins_dir, "/".join(section_str[:2])) if not isdir(git_path): # clone it down status = self.p_helper.clone( image.attrs['Config']['Labels']['vent.repo']) template.set_option(section, 'path', join(git_path, section_str[-3][1:])) # get template settings # TODO account for template files not named vent.template v_template = Template(template=join( git_path, section_str[-3][1:], 'vent.template')) tool_sections = v_template.sections() if tool_sections[0]: for s in tool_sections[1]: section_dict = {} options = v_template.options(s) if options[0]: for option in options[1]: option_name = option if option == 'name': # get link name template.set_option( section, "link_name", v_template.option(s, option)[1]) option_name = 'link_name' opt_val = v_template.option(s, option)[1] section_dict[option_name] = opt_val if section_dict: template.set_option(section, s, json.dumps(section_dict)) if ('vent.type' in image.attrs['Config']['Labels'] and image.attrs['Config']['Labels']['vent.type'] == 'repository'): template.set_option(section, 'namespace', "/".join(section_str[:2])) template.set_option(section, 'enabled', 'yes') template.set_option(section, 'branch', section_str[-2]) template.set_option(section, 'version', section_str[-1]) template.set_option(section, 'last_updated', str(datetime.utcnow()) + " UTC") template.set_option(section, 'image_name', image.attrs['RepoTags'][0]) template.set_option(section, 'type', 'repository') if 'vent.groups' in image.attrs['Config']['Labels']: template.set_option( section, 'groups', image.attrs['Config']['Labels']['vent.groups']) template.set_option(section, 'built', 'yes') template.set_option(section, 'image_id', image.attrs['Id'].split(":")[1][:12]) template.set_option(section, 'running', 'no') # check if image is running as a container containers = self.d_client.containers.list( filters={'label': 'vent'}) for container in containers: if container.attrs['Image'] == image.attrs['Id']: template.set_option(section, 'running', 'yes') add_sections.append(section) template.write_config() if status[0]: status = (True, add_sections) return status
def __init__(self, **kargs): self.api_action = Action(**kargs) self.plugin = self.api_action.plugin self.p_helper = self.api_action.p_helper self.logger = Logger(__name__)
def file_queue(path, template_path='/vent/', r_host='redis'): """ Processes files that have been added from the rq-worker, starts plugins that match the mime type for the new file. """ import configparser import ast import docker import json import requests import os import sys import time import uuid from redis import Redis from rq import Queue from subprocess import check_output, Popen, PIPE from string import punctuation from vent.helpers.logs import Logger status = (True, None) images = [] configs = {} logger = Logger(__name__) if (os.path.isfile('/root/.vent/vent.cfg') and os.path.isfile('/root/.vent/plugin_manifest.cfg')): template_path = '/root/.vent/' try: d_client = docker.from_env() # get the correct path for binding vent_config = configparser.ConfigParser(interpolation=None) vent_config.optionxform = str vent_config.read(template_path+'vent.cfg') if (vent_config.has_section('main') and vent_config.has_option('main', 'files')): files = vent_config.get('main', 'files') else: files = '/' # deal with ~ files = os.path.expanduser(files) chars = set(punctuation) chars.discard('/') chars.discard('_') chars.discard('-') file_name = '' # escape any funky symbols to allow users FREEDOM of directory name for char in files: if char in chars: if char == '\\': file_name += '\\' + char else: file_name += '\\\\' + char else: file_name += char files = file_name _, path = path.split('_', 1) directory = path.rsplit('/', 1)[0] path = path.replace('/files', files, 1) path_copy = path # read in configuration of plugins to get the ones that should run # against the path. # keep track of images that failed getting configurations for failed_images = set() config = configparser.ConfigParser(interpolation=None) config.optionxform = str print('Path to manifest: ' + template_path+'plugin_manifest.cfg') config.read(template_path+'plugin_manifest.cfg') sections = config.sections() name_maps = {} orig_path_d = {} path_cmd = {} labels_d = {} # get all name maps for section in sections: link_name = config.get(section, 'link_name') image_name = config.get(section, 'image_name') name_maps[link_name] = image_name.replace( ':', '-').replace('/', '-') for section in sections: path = path_copy orig_path = '' repo = config.get(section, 'repo') t_type = config.get(section, 'type') labels = {'vent-plugin': '', 'file': path, 'vent.section': section, 'vent.repo': repo, 'vent.type': t_type} image_name = config.get(section, 'image_name') link_name = config.get(section, 'link_name') # doesn't matter if it's a repository or registry because both in manifest if config.has_option(section, 'groups'): if 'replay' in config.get(section, 'groups'): try: # read the vent.cfg file to grab the network-mapping # specified. For replay_pcap n_name = 'network-mapping' n_map = [] if vent_config.has_section(n_name): # make sure that the options aren't empty if vent_config.options(n_name): options = vent_config.options(n_name) for option in options: if vent_config.get(n_name, option): n_map.append(vent_config.get( n_name, option)) orig_path = path path = str(n_map[0]) + ' ' + path except Exception as e: # pragma: no cover failed_images.add(image_name) status = (False, str(e)) if config.has_option(section, 'service'): try: options_dict = json.loads(config.get(section, 'service')) for option in options_dict: value = options_dict[option] labels[option] = value except Exception as e: # pragma: no cover failed_images.add(image_name) status = (False, str(e)) if config.has_option(section, 'settings'): try: options_dict = json.loads(config.get(section, 'settings')) in_base = directory == '/files' # process base by default process_file = in_base # check if this tool shouldn't process the base by default if 'process_base' in options_dict: if options_dict['process_base'] == 'no': process_file = False # check if this tool should look at subdirs created by # other tools' output if 'process_from_tool' in options_dict and not in_base: for tool in options_dict['process_from_tool'].split(','): dir_pieces = directory.split('/') dir_check = dir_pieces for dir_piece in dir_pieces: if 'UTC' in dir_piece: dir_check = dir_piece if tool.replace(' ', '-') in dir_check: process_file = True if 'ext_types' in options_dict and process_file: ext_types = options_dict['ext_types'].split(',') for ext_type in ext_types: if path.endswith(ext_type): images.append(image_name) configs[image_name] = {} except Exception as e: # pragma: no cover failed_images.add(image_name) status = (False, str(e)) if image_name in configs: if config.has_option(section, 'docker'): try: options_dict = ast.literal_eval( config.get(section, 'docker')) for option in options_dict: try: configs[image_name][option] = ast.literal_eval( options_dict[option]) except Exception as e: # pragma: no cover configs[image_name][option] = options_dict[option] if 'links' in configs[image_name]: for link in configs[image_name]['links']: if link in name_maps: configs[image_name]['links'][name_maps[link] ] = configs[image_name]['links'].pop(link) # TODO network_mode # TODO volumes_from # TODO external services except Exception as e: # pragma: no cover failed_images.add(image_name) status = (False, str(e)) if config.has_option(section, 'gpu') and image_name in configs: try: options_dict = json.loads(config.get(section, 'gpu')) if 'enabled' in options_dict: enabled = options_dict['enabled'] if enabled == 'yes': configs[image_name]['gpu_options'] = options_dict labels['vent.gpu'] = 'yes' if 'dedicated' in options_dict: labels['vent.gpu.dedicated'] = options_dict['dedicated'] if 'device' in options_dict: labels['vent.gpu.device'] = options_dict['device'] if 'mem_mb' in options_dict: labels['vent.gpu.mem_mb'] = options_dict['mem_mb'] port = '' host = '' if (vent_config.has_section('nvidia-docker-plugin') and vent_config.has_option('nvidia-docker-plugin', 'port')): port = vent_config.get( 'nvidia-docker-plugin', 'port') else: port = '3476' if (vent_config.has_section('nvidia-docker-plugin') and vent_config.has_option('nvidia-docker-plugin', 'host')): host = vent_config.get( 'nvidia-docker-plugin', 'host') else: # grab the default gateway try: route = Popen(('/sbin/ip', 'route'), stdout=PIPE) host = check_output(('awk', '/default/ {print$3}'), stdin=route.stdout).strip().decode('utf-8') route.wait() except Exception as e: # pragma no cover logger.error('Default gateway ' 'went wrong ' + str(e)) nd_url = 'http://' + host + ':' + port + '/v1.0/docker/cli' params = {'vol': 'nvidia_driver'} try: r = requests.get(nd_url, params=params) if r.status_code == 200: options = r.text.split() for option in options: if option.startswith('--volume-driver='): configs[image_name]['volume_driver'] = option.split('=', 1)[ 1] elif option.startswith('--volume='): vol = option.split('=', 1)[ 1].split(':') if 'volumes' in configs[image_name]: # !! TODO handle if volumes is a list configs[image_name]['volumes'][vol[0]] = {'bind': vol[1], 'mode': vol[2]} else: configs[image_name]['volumes'] = {vol[0]: {'bind': vol[1], 'mode': vol[2]}} elif option.startswith('--device='): dev = option.split('=', 1)[1] if 'devices' in configs[image_name]: configs[image_name]['devices'].append(dev + ':' + dev + ':rwm') else: configs[image_name]['devices'] = [ dev + ':' + dev + ':rwm'] else: # unable to parse option provided by # nvidia-docker-plugin pass except Exception as e: # pragma: no cover failed_images.add(image_name) status = (False, str(e)) print('Failure with nvidia-docker-plugin: ' + str(e)) except Exception as e: # pragma: no cover failed_images.add(image_name) status = (False, str(e)) print('Unable to process gpu options: ' + str(e)) path_cmd[image_name] = path orig_path_d[image_name] = orig_path labels_d[image_name] = labels # TODO get syslog address rather than hardcode # TODO add group label # TODO get group and name for syslog tag log_config = {'type': 'syslog', 'config': {'syslog-address': 'tcp://0.0.0.0:514', 'syslog-facility': 'daemon', 'tag': '{{.Name}}'}} # setup gpu queue can_queue_gpu = True try: q = Queue(connection=Redis(host=r_host), default_timeout=86400) except Exception as e: # pragma: no cover can_queue_gpu = False print('Unable to connect to redis: ' + str(e)) # start containers for image in images: if image not in failed_images: orig_path = orig_path_d[image] labels = labels_d[image] configs[image]['remove'] = True name = image.replace('/', '-').replace(':', '-') + '_' + \ str(int(time.time()))+'_'+str(uuid.uuid4())[:4] if orig_path: # replay_pcap is special so we can't bind it like normal # since the plugin takes in an additional argument dir_path = orig_path.rsplit('/', 1)[0] else: dir_path = path.rsplit('/', 1)[0] volumes = {dir_path: {'bind': dir_path, 'mode': 'rw'}} if 'volumes' in configs[image]: for volume in volumes: configs[image]['volumes'][volume] = volumes[volume] else: configs[image]['volumes'] = volumes command = path_cmd[image] if 'command' in configs[image]: command = configs[image]['command'] + ' ' + command del configs[image]['command'] if 'vent.gpu' in labels and labels['vent.gpu'] == 'yes': if can_queue_gpu: # queue up containers requiring a gpu q_str = json.dumps({'image': image, 'command': command, 'labels': labels, 'detach': True, 'name': name, 'log_config': log_config, 'configs': configs[image]}) q.enqueue('watch.gpu_queue', q_str, ttl=2592000) else: failed_images.add(image) else: if 'gpu_options' in configs[image]: del configs[image]['gpu_options'] d_client.containers.run(image=image, command=command, labels=labels, detach=True, name=name, log_config=log_config, **configs[image]) if failed_images: status = (False, failed_images) else: status = (True, images) except Exception as e: # pragma: no cover status = (False, str(e)) print('Error on line {}'.format(sys.exc_info()[-1].tb_lineno)) print('Failed to process job: ' + str(e)) print(str(status)) return status
class ToolForm(npyscreen.ActionForm): """ Tools form for the Vent CLI """ def __init__(self, *args, **keywords): """ Initialize tool form objects """ self.logger = Logger(__name__) self.logger.info(str(keywords['names'])) self.api_action = Action() self.m_helper = MenuHelper() action = {'api_action': self.api_action} self.tools_tc = {} self.repo_widgets = {} if keywords['action_dict']: action.update(keywords['action_dict']) if keywords['names']: i = 1 for name in keywords['names']: action['action_object' + str(i)] = getattr( self.api_action, name) i += 1 self.action = action # get list of all possible group views to display self.views = deque() possible_groups = set() manifest = Template(self.api_action.plugin.manifest) if self.action['cores']: tools = self.api_action.inventory(choices=['core'])[1]['core'] else: tools = self.api_action.inventory(choices=['tools'])[1]['tools'] for tool in tools: groups = manifest.option(tool, 'groups')[1].split(',') for group in groups: # don't do core because that's the purpose of all in views if group != '' and group != 'core': possible_groups.add(group) self.manifest = manifest self.views += possible_groups self.views.append('all groups') self.no_instance = ['build', 'remove'] super(ToolForm, self).__init__(*args, **keywords) def quit(self, *args, **kwargs): """ Overridden to switch back to MAIN form """ self.parentApp.switchForm('MAIN') def toggle_view(self, *args, **kwargs): """ Toggles the view between different groups """ group_to_display = self.views.popleft() self.cur_view.value = group_to_display for repo in self.tools_tc: for tool in self.tools_tc[repo]: t_groups = self.manifest.option(tool, 'groups')[1] if group_to_display not in t_groups and \ group_to_display != 'all groups': self.tools_tc[repo][tool].value = False self.tools_tc[repo][tool].hidden = True else: self.tools_tc[repo][tool].value = True self.tools_tc[repo][tool].hidden = False # redraw elements self.display() # add view back to queue self.views.append(group_to_display) def create(self, group_view=False): """ Update with current tools """ self.add_handlers({"^T": self.quit, "^Q": self.quit}) self.add(npyscreen.TitleText, name='Select which tools to ' + self.action['action'] + ':', editable=False) togglable = ['remove', 'enable', 'disable', 'build'] if self.action['action_name'] in togglable: self.cur_view = self.add(npyscreen.TitleText, name='Group view:', value='all groups', editable=False, rely=3) self.add_handlers({"^V": self.toggle_view}) i = 5 else: i = 4 if self.action['action_name'] == 'start': response = self.action['api_action'].inventory(choices=[ 'repos', 'tools', 'built', 'enabled', 'running', 'core' ]) else: response = self.action['api_action'].inventory( choices=['core', 'repos', 'tools']) if response[0]: inventory = response[1] repos = inventory['repos'] # dict has repo as key and list of core/non-core tools as values has_core = {} has_non_core = {} # find all tools that are in this repo # and list them if they are core for repo in repos: core_list = [] ncore_list = [] # splice the repo names for processing if (repo.startswith("http")): repo_name = repo.rsplit("/", 2)[1:] else: repo_name = repo.split("/") # determine if enabled or disabled tools should be shown show_disabled = False if 'action_name' in self.action: if self.action['action_name'] == 'enable': show_disabled = True for tool in inventory['tools']: tool_repo_name = tool.split(":") # cross reference repo names if (repo_name[0] == tool_repo_name[0] and repo_name[1] == tool_repo_name[1]): # check to ensure tool not set to locally active = no # in vent.cfg externally_active = False vent_cfg_file = self.action['api_action'].vent_config vent_cfg = Template(vent_cfg_file) tool_pairs = vent_cfg.section('external-services')[1] for ext_tool in tool_pairs: if ext_tool[0].lower() == inventory['tools'][tool]: try: ext_tool_options = json.loads(ext_tool[1]) loc = 'locally_active' if (loc in ext_tool_options and ext_tool_options[loc] == 'no'): externally_active = True except Exception as e: self.logger.error("Couldn't check ext" " because: " + str(e)) externally_active = False # check to ensure not disabled disabled = False manifest = Template(self.api_action.plugin.manifest) if manifest.option(tool, 'enabled')[1] == 'no': disabled = True if (not externally_active and not disabled and not show_disabled): instance_num = re.search( r'\d+$', manifest.option(tool, 'name')[1]) if not instance_num: ncore_list.append(tool) # multiple instances share same image elif self.action[ 'action_name'] not in self.no_instance: ncore_list.append(tool) elif (not externally_active and disabled and show_disabled): instance_num = re.search( r'\d+$', manifest.option(tool, 'name')[1]) if not instance_num: ncore_list.append(tool) # multiple instances share same image elif self.action[ 'action_name'] not in self.no_instance: ncore_list.append(tool) for tool in inventory['core']: tool_repo_name = tool.split(":") # cross reference repo names if (repo_name[0] == tool_repo_name[0] and repo_name[1] == tool_repo_name[1]): # check to ensure tool not set to locally active = no # in vent.cfg externally_active = False vent_cfg_file = self.action['api_action'].vent_config vent_cfg = Template(vent_cfg_file) tool_pairs = vent_cfg.section('external-services')[1] for ext_tool in tool_pairs: if ext_tool[0].lower() == inventory['core'][tool]: try: ext_tool_options = json.loads(ext_tool[1]) loc = 'locally_active' if (loc in ext_tool_options and ext_tool_options[loc] == 'no'): externally_active = True except Exception as e: self.logger.error("Couldn't check ext" " because: " + str(e)) externally_active = False # check to ensure not disabled disabled = False manifest = Template(self.api_action.plugin.manifest) if manifest.option(tool, 'enabled')[1] == 'no': disabled = True if (not externally_active and not disabled and not show_disabled): instance_num = re.search( r'\d+$', manifest.option(tool, 'name')[1]) if not instance_num: core_list.append(tool) # multiple instances share same image elif self.action[ 'action_name'] not in self.no_instance: core_list.append(tool) elif (not externally_active and disabled and show_disabled): instance_num = re.search( r'\d+$', manifest.option(tool, 'name')[1]) if not instance_num: core_list.append(tool) # multiple instances share same image elif self.action[ 'action_name'] not in self.no_instance: core_list.append(tool) has_core[repo] = core_list has_non_core[repo] = ncore_list for repo in repos: self.tools_tc[repo] = {} if self.action['cores']: # make sure only repos with core tools are displayed if has_core.get(repo): self.repo_widgets[repo] = self.add(npyscreen.TitleText, name='Plugin: ' + repo, editable=False, rely=i, relx=5) for tool in has_core[repo]: tool_name = tool.split(":", 2)[2].split("/")[-1] if tool_name == "": tool_name = "/" self.tools_tc[repo][tool] = self.add( npyscreen.CheckBox, name=tool_name, value=True, relx=10) i += 1 i += 3 else: # make sure only repos with non-core tools are displayed if has_non_core.get(repo): self.repo_widgets[repo] = self.add(npyscreen.TitleText, name='Plugin: ' + repo, editable=False, rely=i, relx=5) for tool in has_non_core[repo]: tool_name = tool.split(":", 2)[2].split("/")[-1] if tool_name == "": tool_name = "/" self.tools_tc[repo][tool] = self.add( npyscreen.CheckBox, name=tool_name, value=True, relx=10) i += 1 i += 3 return def on_ok(self): """ Take the tool selections and perform the provided action on them """ def diff(first, second): """ Get the elements that exist in the first list and not in the second """ second = set(second) return [item for item in first if item not in second] def popup(original, orig_type, thr, title): """ Start the thread and display a popup of info until the thread is finished """ thr.start() info_str = "" while thr.is_alive(): if orig_type == 'containers': info = diff(Containers(), original) elif orig_type == 'images': info = diff(Images(), original) if info: info_str = "" for entry in info: info_str = entry[0] + ": " + entry[1] + "\n" + info_str if self.action['action_name'] != 'configure': npyscreen.notify_wait(info_str, title=title) time.sleep(1) thr.join() try: result = self.api_action.queue.get(False) if isinstance(result, tuple) and isinstance(result[1], tuple): running, failed = result[1] r_str = '' for container in running: r_str += container + ": successful\n" for container in failed: r_str += container + ": failed\n" npyscreen.notify_confirm(r_str) except Exception as e: # pragma: no cover pass return if self.action['type'] == 'images': originals = Images() else: originals = Containers() tool_d = {} if self.action['action_name'] in ['clean', 'remove', 'stop', 'update']: reconfirmation_str = "" if self.action['cores']: reconfirmation_str = "Are you sure you want to " reconfirmation_str += self.action['action_name'] reconfirmation_str += " core containers?" else: reconfirmation_str = "Are you sure you want to " reconfirmation_str += self.action['action_name'] reconfirmation_str += " plugin containers?" perform = npyscreen.notify_ok_cancel(reconfirmation_str, title="Confirm command") if not perform: return tools_to_configure = [] for repo in self.tools_tc: for tool in self.tools_tc[repo]: self.logger.info(tool) if self.tools_tc[repo][tool].value: t = tool.split(":", 2)[2].split("/")[-1] if t.startswith('/:'): t = " " + t[1:] t = t.split(":") if self.action['action_name'] == 'start': status = self.action['action_object2'](name=t[0], branch=t[1], version=t[2]) if status[0]: tool_d.update(status[1]) elif self.action['action_name'] == 'configure': constraints = { 'name': t[0], 'branch': t[1], 'version': t[2], 'repo': repo } options = ['type'] action = self.action['api_action'] tool = action.p_helper.constraint_options( constraints, options)[0] # only one tool should be returned name = tool.keys()[0] if tool[name]['type'] == 'registry': registry_image = True else: registry_image = False kargs = { 'name': 'Configure ' + t[0], 'tool_name': t[0], 'branch': t[1], 'version': t[2], 'repo': repo, 'next_tool': None, 'get_configure': action.get_configure, 'save_configure': action.save_configure, 'restart_tools': action.restart_tools, 'clean': action.clean, 'prep_start': action.prep_start, 'start_tools': action.start, 'from_registry': registry_image } if tools_to_configure: kargs['next_tool'] = tools_to_configure[-1] self.parentApp.addForm("EDITOR" + t[0], EditorForm, **kargs) tools_to_configure.append("EDITOR" + t[0]) else: kargs = {'name': t[0], 'branch': t[1], 'version': t[2]} # add core recognition if self.action['cores']: kargs.update({'groups': 'core'}) # use latest version for update, not necessarily # version in manifest if self.action['action_name'] == 'update': if t[2] != 'HEAD': repo_commits = self.m_helper.repo_commits( repo)[1] for branch in repo_commits: if branch[0] == t[1]: kargs.update( {'new_version': branch[1][0]}) else: kargs.update({'new_version': 'HEAD'}) thr = Thread(target=self.action['action_object1'], args=(), kwargs=kargs) popup( originals, self.action['type'], thr, 'Please wait, ' + self.action['present_t'] + '...') if self.action['action_name'] == 'start': thr = Thread(target=self.action['action_object1'], args=(), kwargs={'tool_d': tool_d}) popup(originals, self.action['type'], thr, 'Please wait, ' + self.action['present_t'] + '...') if self.action['action_name'] != 'configure': npyscreen.notify_confirm('Done ' + self.action['present_t'] + '.', title=self.action['past_t']) self.quit() else: if len(tools_to_configure) > 0: self.parentApp.change_form(tools_to_configure[-1]) else: npyscreen.notify_confirm( "No tools selected, returning to" " main menu", title="No action taken") self.quit() def on_cancel(self): """ When user clicks cancel, will return to MAIN """ self.quit()
class Repository: def __init__(self, manifest, *args, **kwargs): self.path_dirs = PathDirs(**kwargs) self.manifest = manifest self.d_client = docker.from_env() self.logger = Logger(__name__) def add(self, repo, tools=None, overrides=None, version='HEAD', core=False, image_name=None, branch='master', build=True, user=None, pw=None): status = (True, None) self.repo = repo.lower() self.tools = tools self.overrides = overrides self.branch = branch self.version = version self.image_name = image_name self.core = core status = self._clone(user=user, pw=pw) if status[0] and build: status = self._build() return status def _build(self): status = (True, None) status = self._get_tools() matches = status[1] status = self.path_dirs.apply_path(self.repo) original_path = status[1] if status[0] and len(matches) > 0: repo, org, name = self.path_dirs.get_path(self.repo) cmd = 'git rev-parse --short ' + self.version commit_id = '' try: commit_id = check_output( shlex.split(cmd), stderr=STDOUT, close_fds=True).strip().decode('utf-8') except Exception as e: # pragma: no cover self.logger.error( 'Unable to get commit ID because: {0}'.format(str(e))) template = Template(template=self.manifest) for match in matches: status, template, match_path, image_name, section = self._build_manifest( match, template, repo, org, name, commit_id) if not status[0]: break status, template = self._build_image(template, match_path, image_name, section) if not status[0]: break if status[0]: # write out configuration to the manifest file template.write_config() chdir(original_path) return status def _get_tools(self): status = (True, None) matches = [] path, _, _ = self.path_dirs.get_path(self.repo) status = Checkout(path, branch=self.branch, version=self.version) if status[0]: if self.tools is None and self.overrides is None: # get all tools matches = AvailableTools(path, branch=self.branch, version=self.version, core=self.core) elif self.tools is None: # there's only something in overrides # grab all the tools then apply overrides matches = AvailableTools(path, branch=self.branch, version=self.version, core=self.core) # !! TODO apply overrides to matches elif self.overrides is None: # there's only something in tools # only grab the tools specified matches = ToolMatches(tools=self.tools, version=self.version) else: # both tools and overrides were specified # grab only the tools specified, with the overrides applied o_matches = ToolMatches(tools=self.tools, version=self.version) matches = o_matches for override in self.overrides: override_t = None if override[0] == '.': override_t = ('', override[1]) else: override_t = override for match in o_matches: if override_t[0] == match[0]: matches.remove(match) matches.append(override_t) status = (True, matches) return status def _build_manifest(self, match, template, repo, org, name, commit_id): status = (True, None) # keep track of whether or not to write an additional manifest # entry for multiple instances, and how many additional entries # to write addtl_entries = 1 # remove @ in match for template setting purposes if match[0].find('@') >= 0: true_name = match[0].split('@')[1] else: true_name = match[0] # TODO check for special settings here first for the specific match self.version = match[1] section = org + ':' + name + ':' + true_name + ':' section += self.branch + ':' + self.version # need to get rid of temp identifiers for tools in same repo match_path = repo + match[0].split('@')[0] if self.image_name: image_name = self.image_name elif not self.core: image_name = org + '/' + name if match[0] != '': # if tool is in a subdir, add that to the name of the # image image_name += '-' + '-'.join(match[0].split('/')[1:]) image_name += ':' + self.branch else: image_name = ('cyberreboot/vent-' + match[0].split('/')[-1] + ':' + self.branch) image_name = image_name.replace('_', '-') # check if the section already exists is_there, options = template.section(section) previous_commit = None previous_commits = None head = False if is_there: for option in options: # TODO check if tool name but a different version # exists - then disable/remove if set if option[0] == 'version' and option[1] == 'HEAD': head = True if option[0] == 'built' and option[1] == 'yes': # !! TODO remove pre-existing image pass if option[0] == 'commit_id': previous_commit = option[1] if option[0] == 'previous_versions': previous_commits = option[1] # check if tool comes from multi directory multi_tool = 'no' if match[0].find('@') >= 0: multi_tool = 'yes' # !! TODO # check if section should be removed from config i.e. all tools # but new commit removed one that was in a previous commit image_name = image_name.lower() image_name = image_name.replace('@', '-') # special case for vent images if image_name.startswith('cyberreboot/vent'): image_name = image_name.replace('vent-vent-core-', 'vent-') image_name = image_name.replace('vent-vent-extras-', 'vent-') # set template section & options for tool at version and branch template.add_section(section) template.set_option(section, 'name', true_name.split('/')[-1]) template.set_option(section, 'namespace', org + '/' + name) template.set_option(section, 'path', match_path) template.set_option(section, 'repo', self.repo) template.set_option(section, 'multi_tool', multi_tool) template.set_option(section, 'branch', self.branch) template.set_option(section, 'version', self.version) template.set_option(section, 'last_updated', str(datetime.utcnow()) + ' UTC') template.set_option(section, 'image_name', image_name) template.set_option(section, 'type', 'repository') # save settings in vent.template to plugin_manifest # watch for multiple tools in same directory # just wanted to store match path with @ for path for use in # other actions tool_template = 'vent.template' if match[0].find('@') >= 0: tool_template = match[0].split('@')[1] + '.template' vent_template_path = join(match_path, tool_template) if exists(vent_template_path): with open(vent_template_path, 'r') as f: vent_template_val = f.read() else: vent_template_val = '' settings_dict = ParsedSections(vent_template_val) for setting in settings_dict: template.set_option(section, setting, json.dumps(settings_dict[setting])) # TODO do we need this if we save as a dictionary? vent_template = Template(vent_template_path) vent_status, response = vent_template.option('info', 'name') instances = vent_template.option('settings', 'instances') if instances[0]: addtl_entries = int(instances[1]) if vent_status: template.set_option(section, 'link_name', response) else: template.set_option(section, 'link_name', true_name.split('/')[-1]) if self.version == 'HEAD': template.set_option(section, 'commit_id', commit_id) if head: # no need to store previous commits if not HEAD, since # the version will always be the same commit ID if previous_commit and previous_commit != commit_id: if (previous_commits and previous_commit not in previous_commits): previous_commits = (previous_commit + ',' + previous_commits) elif not previous_commits: previous_commits = previous_commit if previous_commits and previous_commits != commit_id: template.set_option(section, 'previous_versions', previous_commits) groups = vent_template.option('info', 'groups') if groups[0]: template.set_option(section, 'groups', groups[1]) # set groups to empty string if no groups defined for tool else: template.set_option(section, 'groups', '') # write additional entries for multiple instances if addtl_entries > 1: # add 2 for naming conventions for i in range(2, addtl_entries + 1): addtl_section = section.rsplit(':', 2) addtl_section[0] += str(i) addtl_section = ':'.join(addtl_section) template.add_section(addtl_section) orig_vals = template.section(section)[1] for val in orig_vals: template.set_option(addtl_section, val[0], val[1]) template.set_option(addtl_section, 'name', true_name.split('/')[-1] + str(i)) return status, template, match_path, image_name, section def _build_image(self, template, match_path, image_name, section, build_local=False): status = (True, None) output = '' def set_instances(template, section, built, image_id=None): """ Set build information for multiple instances """ i = 2 while True: addtl_section = section.rsplit(':', 2) addtl_section[0] += str(i) addtl_section = ':'.join(addtl_section) if template.section(addtl_section)[0]: template.set_option(addtl_section, 'built', built) if image_id: template.set_option(addtl_section, 'image_id', image_id) template.set_option(addtl_section, 'last_updated', Timestamp()) else: break i += 1 # determine whether a tool should be considered a multi instance multi_instance = False try: settings = template.option(section, 'settings') if settings[0]: settings_dict = json.loads(settings[1]) if 'instances' in settings_dict and int( settings_dict['instances']) > 1: multi_instance = True except Exception as e: # pragma: no cover self.logger.error( 'Failed to check for multi instance because: {0}'.format( str(e))) status = (False, str(e)) cwd = getcwd() chdir(match_path) try: name = template.option(section, 'name') groups = template.option(section, 'groups') t_type = template.option(section, 'type') path = template.option(section, 'path') status, config_override = self.path_dirs.override_config(path[1]) if groups[1] == '' or not groups[0]: groups = (True, 'none') if not name[0]: name = (True, image_name) pull = False image_exists = False cfg_template = Template(template=self.path_dirs.cfg_file) use_existing_image = False result = cfg_template.option('build-options', 'use_existing_images') if result[0]: use_existing_image = result[1] if use_existing_image == 'yes' and not config_override: try: self.d_client.images.get(image_name) i_attrs = self.d_client.images.get(image_name).attrs image_id = i_attrs['Id'].split(':')[1][:12] template.set_option(section, 'built', 'yes') template.set_option(section, 'image_id', image_id) template.set_option(section, 'last_updated', str(datetime.utcnow()) + ' UTC') # set other instances too if multi_instance: set_instances(template, section, 'yes', image_id) status = (True, 'Found {0}'.format(image_name)) self.logger.info(str(status)) image_exists = True except docker.errors.ImageNotFound: image_exists = False except Exception as e: # pragma: no cover self.logger.warning( 'Failed to query Docker for images because: {0}'. format(str(e))) if not image_exists: # pull if '/' in image_name, fallback to build if '/' in image_name and not build_local and not config_override: try: image = self.d_client.images.pull(image_name) i_attrs = self.d_client.images.get(image_name).attrs image_id = i_attrs['Id'].split(':')[1][:12] if image_id: template.set_option(section, 'built', 'yes') template.set_option(section, 'image_id', image_id) template.set_option( section, 'last_updated', str(datetime.utcnow()) + ' UTC') # set other instances too if multi_instance: set_instances(template, section, 'yes', image_id) status = (True, 'Pulled {0}'.format(image_name)) self.logger.info(str(status)) else: template.set_option(section, 'built', 'failed') template.set_option( section, 'last_updated', str(datetime.utcnow()) + ' UTC') # set other instances too if multi_instance: set_instances(template, section, 'failed') status = (False, 'Failed to pull image {0}'.format( str(output.split('\n')[-1]))) self.logger.warning(str(status)) pull = True except Exception as e: # pragma: no cover self.logger.warning( 'Failed to pull image, going to build instead: {0}' .format(str(e))) status = (False, 'Failed to pull image because: {0}'.format( str(e))) if not pull and not image_exists: # get username to label built image with username = getpass.getuser() # see if additional file arg needed for building multiple # images from same directory file_tag = 'Dockerfile' multi_tool = template.option(section, 'multi_tool') if multi_tool[0] and multi_tool[1] == 'yes': specific_file = template.option(section, 'name')[1] if specific_file != 'unspecified': file_tag = 'Dockerfile.' + specific_file # update image name with new version for update image_name = image_name.rsplit(':', 1)[0] + ':' + self.branch labels = {} labels['vent'] = '' labels['vent.section'] = section labels['vent.repo'] = self.repo labels['vent.type'] = t_type[1] labels['vent.name'] = name[1] labels['vent.groups'] = groups[1] labels['built-by'] = username image = self.d_client.images.build(path='.', dockerfile=file_tag, tag=image_name, labels=labels, rm=True) image_id = image[0].id.split(':')[1][:12] template.set_option(section, 'built', 'yes') template.set_option(section, 'image_id', image_id) template.set_option(section, 'last_updated', str(datetime.utcnow()) + ' UTC') # set other instances too if multi_instance: set_instances(template, section, 'yes', image_id) status = (True, 'Built {0}'.format(image_name)) except Exception as e: # pragma: no cover self.logger.error( 'Unable to build image {0} because: {1} | {2}'.format( str(image_name), str(e), str(output))) template.set_option(section, 'built', 'failed') template.set_option(section, 'last_updated', str(datetime.utcnow()) + ' UTC') if multi_instance: set_instances(template, section, 'failed') status = (False, 'Failed to build image because: {0}'.format(str(e))) chdir(cwd) template.set_option(section, 'running', 'no') return status, template def _clone(self, user=None, pw=None): status = (True, None) try: # if path already exists, ignore try: path, _, _ = self.path_dirs.get_path(self.repo) chdir(path) return status except Exception as e: # pragma: no cover self.logger.debug("Repo doesn't exist, attempting to clone.") status = self.path_dirs.apply_path(self.repo) if not status[0]: self.logger.error('Unable to clone because: {0}'.format( str(status[1]))) return status repo = self.repo # check if user and pw were supplied, typically for private repos if user and pw: # only https is supported when using user/pw auth_repo = 'https://' + user + ':' + pw + '@' repo = auth_repo + repo.split('https://')[-1] # clone repo check_output(shlex.split( 'env GIT_SSL_NO_VERIFY=true git clone --recursive ' + repo + ' .'), stderr=STDOUT, close_fds=True).decode('utf-8') chdir(status[1]) status = (True, 'Successfully cloned: {0}'.format(self.repo)) except Exception as e: # pragma: no cover e_str = str(e) # scrub username and password from error message if e_str.find('@') >= 0: e_str = e_str[:e_str.find('//') + 2] + \ e_str[e_str.find('@') + 1:] self.logger.error('Clone failed with error: {0}'.format(e_str)) status = (False, e_str) return status def update(self, repo, tools=None): # TODO return