def Dependencies(tools): """ Takes in a list of tools that are being updated and returns any tools that depend on linking to them """ dependencies = [] if tools: path_dirs = PathDirs() man = Template(os.path.join(path_dirs.meta_dir, 'plugin_manifest.cfg')) for section in man.sections()[1]: # don't worry about dealing with tool if it's not running running = man.option(section, 'running') if not running[0] or running[1] != 'yes': continue t_name = man.option(section, 'name')[1] t_branch = man.option(section, 'branch')[1] t_version = man.option(section, 'version')[1] t_identifier = { 'name': t_name, 'branch': t_branch, 'version': t_version } options = man.options(section)[1] if 'docker' in options: d_settings = json.loads(man.option(section, 'docker')[1]) if 'links' in d_settings: for link in json.loads(d_settings['links']): if link in tools: dependencies.append(t_identifier) return dependencies
def Core(branch="master", **kargs): """ Get the normal core tools, and the currently installed/built/running ones, including custom core services """ # !! TODO this might need to store namespaces/branches/versions core = {'built':[], 'running':[], 'installed':[], 'normal':[]} # get normal core tools plugins = Plugin(plugins_dir=".internals/plugins") status, cwd = plugins.clone('https://github.com/cyberreboot/vent') if status: plugins.version = 'HEAD' plugins.branch = branch response = plugins.checkout() matches = plugins._available_tools(groups='core') for match in matches: core['normal'].append(match[0].split('/')[-1]) else: core['normal'] = 'failed' # get core tools that have been installed path_dirs = PathDirs(**kargs) manifest = os.path.join(path_dirs.meta_dir, "plugin_manifest.cfg") template = Template(template=manifest) tools = template.sections() if tools[0]: for tool in tools[1]: groups = template.option(tool, "groups") if groups[0] and "core" in groups[1]: name = template.option(tool, "name") if name[0]: core['installed'].append(name[1]) # get core tools that have been built and/or are running try: d_client = docker.from_env() images = d_client.images.list() for image in images: try: if "vent.groups" in image.attrs['Labels'] and 'core' in image.attrs['Labels']['vent.groups']: if 'vent.name' in image.attrs['Labels']: core['built'].append(image.attrs['Labels']['vent.name']) except Exception as err: # pragma: no cover pass containers = d_client.containers.list() for container in containers: try: if "vent.groups" in container.attrs['Config']['Labels'] and 'core' in container.attrs['Config']['Labels']['vent.groups']: if 'vent.name' in container.attrs['Config']['Labels']: core['running'].append(container.attrs['Config']['Labels']['vent.name']) except Exception as err: # pragma: no cover pass except Exception as e: # pragma: no cover pass return core
def DropLocation(): """ Get the directory that file drop is watching """ template = Template(template=PathDirs().cfg_file) drop_loc = template.option("main", "files")[1] drop_loc = os.path.expanduser(drop_loc) drop_loc = os.path.abspath(drop_loc) return (True, drop_loc)
def list_tools(self): """ Return list of tuples of all tools """ tools = [] template = Template(template=self.manifest) exists, sections = template.sections() if exists: for section in sections: options = { 'section': section, 'enabled': None, 'built': None, 'version': None, 'repo': None, 'branch': None, 'name': None, 'groups': None, 'image_name': None } for option in options.keys(): exists, value = template.option(section, option) if exists: options[option] = value tools.append(options) return tools
def start_priority_containers(self, groups, group_orders, tool_d): """ Select containers based on priorities to start """ vent_cfg = Template(self.path_dirs.cfg_file) cfg_groups = vent_cfg.option('groups', 'start_order') if cfg_groups[0]: cfg_groups = cfg_groups[1].split(',') else: cfg_groups = [] all_groups = sorted(set(groups)) s_conts = [] f_conts = [] # start tools in order of group defined in vent.cfg for group in cfg_groups: # remove from all_groups because already checked out if group in all_groups: all_groups.remove(group) if group in group_orders: for cont_t in sorted(group_orders[group]): if cont_t[1] not in s_conts: s_conts, f_conts = self.start_containers( cont_t[1], tool_d, s_conts, f_conts) # start tools that haven't been specified in the vent.cfg, if any for group in all_groups: if group in group_orders: for cont_t in sorted(group_orders[group]): if cont_t[1] not in s_conts: s_conts, f_conts = self.start_containers( cont_t[1], tool_d, s_conts, f_conts) return (s_conts, f_conts)
def toggle_view(self, *args, **kwargs): group = self.views.popleft() new_display = [] new_display.append('Tools for group ' + group + ' found:') manifest = Template(self.api_action.plugin.manifest) cur_repo = '' for i in range(1, len(self.all_tools) - 1): val = self.all_tools[i] # get repo val if val.startswith(" Plugin:"): new_display.append(val) cur_repo = val.split(':', 1)[1].strip() # determine if tool should be displayed in this group elif val.startswith(" ") and not val.startswith(" "): name = val.strip() constraints = {"repo": cur_repo, "name": name} t_section = self.api_action.p_helper \ .constraint_options(constraints, [])[0] t_section = t_section.keys()[0] if group in manifest.option(t_section, 'groups')[1].split(','): new_display += self.all_tools[i:i+5] elif val == '': new_display.append(val) # if all groups display all groups if group == 'all groups': self.display_val.values = self.all_tools else: self.display_val.values = new_display # redraw self.display() # add group back into cycle self.views.append(group)
def _available_tools(self, groups=None): """ Return list of possible tools in repo for the given version and branch """ matches = [] if not hasattr(self, 'path'): return matches if groups: groups = groups.split(",") for root, dirnames, filenames in os.walk(self.path): for filename in fnmatch.filter(filenames, 'Dockerfile'): # !! TODO deal with wild/etc.? if groups: try: template = Template( template=os.path.join(root, 'vent.template')) for group in groups: template_groups = template.option("info", "groups") if template_groups[0] and group in template_groups[ 1]: matches.append( (root.split(self.path)[1], self.version)) except Exception as e: # pragma: no cover pass else: matches.append((root.split(self.path)[1], self.version)) return matches
def host_config(self): """ Ensure the host configuration file exists """ default_file_dir = "/tmp/vent_files" config = Template(template=os.path.join(self.base_dir, "vent.cfg")) resp = config.section("main") if resp[0]: resp = config.option("main", "files") if not resp[0]: config.add_option("main", "files", default_file_dir) self.ensure_dir(default_file_dir) else: config.add_option("main", "files", default_file_dir) self.ensure_dir(default_file_dir) config.write_config() return
def available_tools(self, path, version="HEAD", groups=None): """ Return list of possible tools in repo for the given version and branch """ matches = [] if groups: groups = groups.split(",") for root, _, filenames in walk(path): files = fnmatch.filter(filenames, 'Dockerfile*') # append additional identifiers to tools if multiple in same # directory add_info = len(files) > 1 for f in files: # !! TODO deal with wild/etc.? addtl_info = '' if add_info: # @ will be delimiter symbol for multi-tools try: addtl_info = '@' + f.split('.')[1] except Exception as e: addtl_info = '@unspecified' if groups: if add_info and not addtl_info == '@unspecified': tool_template = addtl_info.split('@')[1] + '.template' else: tool_template = 'vent.template' try: template = Template(template=join(root, tool_template)) for group in groups: template_groups = template.option("info", "groups") if (template_groups[0] and group in template_groups[1]): matches.append((root.split(path)[1] + addtl_info, version)) except Exception as e: # pragma: no cover self.logger.info("error: " + str(e)) else: matches.append((root.split(path)[1] + addtl_info, version)) return matches
def __init__(self, action=None, logger=None, *args, **keywords): """ Initialize inventory form objects """ self.action = action self.logger = logger self.api_action = self.action['api_action'] # get list of all possible group views to display self.views = deque() possible_groups = set() manifest = Template(self.api_action.plugin.manifest) if self.action['cores']: tools = self.api_action.inventory(choices=['core'])[1]['core'] else: tools = self.api_action.inventory(choices=['tools'])[1]['tools'] for tool in tools: groups = manifest.option(tool, 'groups')[1].split(',') for group in groups: # don't do core because that's the purpose of all in views if group != '' and group != 'core': possible_groups.add(group) self.views += possible_groups self.views.append('all groups') super(InventoryForm, self).__init__(*args, **keywords)
def tools(self): """ Return list of tuples of all tools """ tools = [] template = Template(template=self.manifest) exists, sections = template.sections() if exists: for section in sections: options = {'section':section, 'enabled':None, 'built':None, 'version':None, 'repo':None, 'branch':None, 'name':None, 'groups':None, 'image_name':None} for option in options.keys(): exists, value = template.option(section, option) if exists: options[option] = value tools.append(options) return tools
def _available_tools(self, groups=None): """ Return list of possible tools in repo for the given version and branch """ matches = [] if not hasattr(self, 'path'): return matches if groups: groups = groups.split(",") for root, dirnames, filenames in os.walk(self.path): for filename in fnmatch.filter(filenames, 'Dockerfile'): # !! TODO deal with wild/etc.? if groups: try: template = Template(template=os.path.join(root, 'vent.template')) for group in groups: template_groups = template.option("info", "groups") if template_groups[0] and group in template_groups[1]: matches.append((root.split(self.path)[1], self.version)) except Exception as e: # pragma: no cover pass else: matches.append((root.split(self.path)[1], self.version)) return matches
def __init__(self, *args, **keywords): """ Initialize tool form objects """ self.logger = Logger(__name__) self.logger.info(str(keywords['names'])) self.api_action = Action() self.m_helper = MenuHelper() action = {'api_action': self.api_action} self.tools_tc = {} self.repo_widgets = {} if keywords['action_dict']: action.update(keywords['action_dict']) if keywords['names']: i = 1 for name in keywords['names']: action['action_object' + str(i)] = getattr( self.api_action, name) i += 1 self.action = action # get list of all possible group views to display self.views = deque() possible_groups = set() manifest = Template(self.api_action.plugin.manifest) if self.action['cores']: tools = self.api_action.inventory(choices=['core'])[1]['core'] else: tools = self.api_action.inventory(choices=['tools'])[1]['tools'] for tool in tools: groups = manifest.option(tool, 'groups')[1].split(',') for group in groups: # don't do core because that's the purpose of all in views if group != '' and group != 'core': possible_groups.add(group) self.manifest = manifest self.views += possible_groups self.views.append('all groups') self.no_instance = ['build', 'remove'] super(ToolForm, self).__init__(*args, **keywords)
def GpuUsage(**kargs): """ Get the current GPU usage of available GPUs """ usage = (False, None) gpu_status = {'vent_usage': {'dedicated': [], 'mem_mb': {}}} path_dirs = PathDirs(**kargs) path_dirs.host_config() template = Template(template=path_dirs.cfg_file) # get running jobs using gpus try: d_client = docker.from_env() c = d_client.containers.list(all=False, filters={'label': 'vent-plugin'}) for container in c: if ('vent.gpu' in container.attrs['Config']['Labels'] and container.attrs['Config']['Labels']['vent.gpu'] == 'yes'): device = container.attrs['Config']['Labels']['vent.gpu.device'] if ('vent.gpu.dedicated' in container.attrs['Config']['Labels'] and container.attrs['Config']['Labels'] ['vent.gpu.dedicated'] == 'yes'): gpu_status['vent_usage']['dedicated'].append(device) elif 'vent.gpu.mem_mb' in container.attrs['Config']['Labels']: if device not in gpu_status['vent_usage']['mem_mb']: gpu_status['vent_usage']['mem_mb'][device] = 0 gpu_status['vent_usage']['mem_mb'][device] += int( container.attrs['Config']['Labels']['vent.gpu.mem_mb']) except Exception as e: # pragma: no cover pass port = '3476' # default docker gateway host = '172.17.0.1' result = template.option('nvidia-docker-plugin', 'port') if result[0]: port = result[1] result = template.option('nvidia-docker-plugin', 'host') if result[0]: host = result[1] else: try: # now just requires ip, ifconfig route = check_output(('ip', 'route')).split('\n') default = '' # grab the default network device. for device in route: if 'default' in device: default = device.split()[4] break # grab the IP address for the default device ip_addr = check_output(('ifconfig', default)) ip_addr = ip_addr.split('\n')[1].split()[1] host = ip_addr except Exception as e: # pragma: no cover pass # have to get the info separately to determine how much memory is availabe nd_url = 'http://' + host + ':' + port + '/v1.0/gpu/info/json' try: r = requests.get(nd_url) if r.status_code == 200: status = r.json() for i, device in enumerate(status['Devices']): gm = int(round(math.log(int(device['Memory']['Global']), 2))) gpu_status[i] = { 'global_memory': 2**gm, 'cores': device['Cores'] } else: usage = (False, "Unable to get GPU usage request error code: " + str(r.status_code)) except Exception as e: # pragma: no cover usage = (False, "Error: " + str(e)) # get actual status of each gpu nd_url = 'http://' + host + ':' + port + '/v1.0/gpu/status/json' try: r = requests.get(nd_url) if r.status_code == 200: status = r.json() for i, device in enumerate(status['Devices']): if i not in gpu_status: gpu_status[i] = {} gpu_status[i]['utilization'] = device['Utilization'] gpu_status[i]['memory'] = device['Memory'] gpu_status[i]['processes'] = device['Processes'] usage = (True, gpu_status) else: usage = (False, "Unable to get GPU usage request error code: " + str(r.status_code)) except Exception as e: # pragma: no cover usage = (False, "Error: " + str(e)) return usage
def prep_start(self, repo=None, name=None, groups=None, enabled='yes', branch='master', version='HEAD'): """ Start a set of tools that match the parameters given, if no parameters are given, start all installed tools on the master branch at verison HEAD that are enabled """ args = locals() self.logger.info('Starting: prep_start') self.logger.info('Arguments: ' + str(args)) status = (False, None) try: options = [ 'name', 'namespace', 'built', 'groups', 'path', 'image_name', 'branch', 'repo', 'type', 'version' ] vent_config = Template(template=self.path_dirs.cfg_file) manifest = Template(self.manifest) files = vent_config.option('main', 'files') files = (files[0], expanduser(files[1])) s, _ = self.constraint_options(args, options) status, tool_d = self.start_sections(s, files, groups, enabled, branch, version) # look out for links to delete because they're defined externally links_to_delete = set() # get instances for each tool tool_instances = {} sections = manifest.sections()[1] for section in sections: settings = manifest.option(section, 'settings') if settings[0]: settings = json.loads(settings[1]) if 'instances' in settings: l_name = manifest.option(section, 'link_name') if l_name[0]: tool_instances[l_name[1]] = int( settings['instances']) # check and update links, volumes_from, network_mode for container in list(tool_d.keys()): if 'labels' not in tool_d[ container] or 'vent.groups' not in tool_d[container][ 'labels'] or 'core' not in tool_d[container][ 'labels']['vent.groups']: tool_d[container]['remove'] = True if 'links' in tool_d[container]: for link in list(tool_d[container]['links'].keys()): # add links to external services already running if # necessary, by default configure local services too configure_local = True ext = 'external-services' if link in vent_config.options(ext)[1]: try: lconf = json.loads( vent_config.option(ext, link)[1]) if ('locally_active' not in lconf or lconf['locally_active'] == 'no'): ip_adr = lconf['ip_address'] port = lconf['port'] tool_d[container]['extra_hosts'] = {} # containers use lowercase names for # connections tool_d[container]['extra_hosts'][ link.lower()] = ip_adr # create an environment variable for container # to access port later env_variable = link.upper() + \ '_CUSTOM_PORT=' + port if 'environment' not in tool_d[container]: tool_d[container]['environment'] = [] tool_d[container]['environment'].append( env_variable) # remove the entry from links because no # longer connecting to local container links_to_delete.add(link) configure_local = False except Exception as e: # pragma: no cover self.logger.error("couldn't load external" ' settings because: ' + str(e)) configure_local = True status = False if configure_local: for c in list(tool_d.keys()): if ('tmp_name' in tool_d[c] and tool_d[c]['tmp_name'] == link): tool_d[container]['links'][ tool_d[c]['name']] = tool_d[container][ 'links'].pop(link) if link in tool_instances and tool_instances[ link] > 1: for i in range( 2, tool_instances[link] + 1): tool_d[container]['links'][ tool_d[c]['name'] + str(i)] = tool_d[container][ 'links'][tool_d[c] ['name']] + str(i) if 'volumes_from' in tool_d[container]: tmp_volumes_from = tool_d[container]['volumes_from'] tool_d[container]['volumes_from'] = [] for volumes_from in list(tmp_volumes_from): for c in list(tool_d.keys()): if ('tmp_name' in tool_d[c] and tool_d[c]['tmp_name'] == volumes_from): tool_d[container]['volumes_from'].append( tool_d[c]['name']) tmp_volumes_from.remove(volumes_from) tool_d[container]['volumes_from'] += tmp_volumes_from if 'network_mode' in tool_d[container]: if tool_d[container]['network_mode'].startswith( 'container:'): network_c_name = tool_d[container][ 'network_mode'].split('container:')[1] for c in list(tool_d.keys()): if ('tmp_name' in tool_d[c] and tool_d[c]['tmp_name'] == network_c_name): tool_d[container]['network_mode'] = 'container:' + \ tool_d[c]['name'] # remove tmp_names for c in list(tool_d.keys()): if 'tmp_name' in tool_d[c]: del tool_d[c]['tmp_name'] # remove links section if all were externally configured for c in list(tool_d.keys()): if 'links' in tool_d[c]: for link in links_to_delete: if link in tool_d[c]['links']: del tool_d[c]['links'][link] # delete links if no more defined if not tool_d[c]['links']: del tool_d[c]['links'] # remove containers that shouldn't be started for c in list(tool_d.keys()): deleted = False if 'start' in tool_d[c] and not tool_d[c]['start']: del tool_d[c] deleted = True if not deleted: # look for tools services that are being done externally # tools are capitalized in vent.cfg, so make them lowercase # for comparison ext = 'external-services' external_tools = vent_config.section(ext)[1] name = tool_d[c]['labels']['vent.name'] for tool in external_tools: if name == tool[0].lower(): try: tool_config = json.loads(tool[1]) if ('locally_active' in tool_config and tool_config['locally_active'] == 'no'): del tool_d[c] except Exception as e: # pragma: no cover self.logger.warning( 'Locally running container ' + name + ' may be redundant') if status: status = (True, tool_d) else: status = (False, tool_d) except Exception as e: # pragma: no cover self.logger.error('prep_start failed with error: ' + str(e)) status = (False, e) self.logger.info('Status of prep_start: ' + str(status[0])) self.logger.info('Finished: prep_start') return status
def start_sections(self, s, files, groups, enabled, branch, version): """ Run through sections for prep_start """ tool_d = {} status = (True, None) for section in s: # initialize needed vars c_name = s[section]['image_name'].replace(':', '-') c_name = c_name.replace('/', '-') instance_num = re.search(r'\d+$', s[section]['name']) if instance_num: c_name += instance_num.group() image_name = s[section]['image_name'] # checkout the right version and branch of the repo cwd = getcwd() self.logger.info('current directory is: ' + str(cwd)) # images built from registry won't have path if s[section]['path'] != '': chdir(join(s[section]['path'])) # TODO commenting out for now, should use update_repo #status = self.checkout(branch=branch, version=version) status = (True, None) self.logger.info(status) chdir(cwd) tool_d[c_name] = {'image': image_name, 'name': c_name} # get rid of all commented sections in various runtime # configurations manifest = Template(self.manifest) overall_dict = {} for setting in ['info', 'docker', 'gpu', 'settings', 'service']: option = manifest.option(section, setting) if option[0]: overall_dict[setting] = {} settings_dict = json.loads(option[1]) for opt in settings_dict: if not opt.startswith('#'): overall_dict[setting][opt] = \ settings_dict[opt] if 'docker' in overall_dict: options_dict = overall_dict['docker'] for option in options_dict: options = options_dict[option] # check for commands to evaluate if '`' in options: cmds = options.split('`') if len(cmds) > 2: i = 1 while i < len(cmds): try: cmds[i] = check_output( shlex.split(cmds[i]), stderr=STDOUT, close_fds=True).strip().decode('utf-8') except Exception as e: # pragma: no cover self.logger.error( 'unable to evaluate command specified in vent.template: ' + str(e)) i += 2 options = ''.join(cmds) # check for commands to evaluate # store options set for docker try: tool_d[c_name][option] = literal_eval(options) except Exception as e: # pragma: no cover self.logger.info('unable to literal_eval: ' + str(options)) tool_d[c_name][option] = options if 'labels' not in tool_d[c_name]: tool_d[c_name]['labels'] = {} # get the service uri info if 'service' in overall_dict: try: options_dict = overall_dict['service'] for option in options_dict: tool_d[c_name]['labels'][option] = options_dict[option] except Exception as e: # pragma: no cover self.logger.error('unable to store service options for ' 'docker: ' + str(e)) # check for gpu settings if 'gpu' in overall_dict: try: options_dict = json.loads(status[1]) for option in options_dict: tool_d[c_name]['labels']['gpu.' + option] = options_dict[option] except Exception as e: # pragma: no cover self.logger.error('unable to store gpu options for ' 'docker: ' + str(e)) # get temporary name for links, etc. plugin_c = Template(template=self.manifest) status, plugin_sections = plugin_c.sections() self.logger.info(status) for plugin_section in plugin_sections: status = plugin_c.option(plugin_section, 'link_name') self.logger.info(status) image_status = plugin_c.option(plugin_section, 'image_name') self.logger.info(image_status) if status[0] and image_status[0]: cont_name = image_status[1].replace(':', '-') cont_name = cont_name.replace('/', '-') if cont_name not in tool_d: tool_d[cont_name] = { 'image': image_status[1], 'name': cont_name, 'start': False } tool_d[cont_name]['tmp_name'] = status[1] # add extra labels tool_d[c_name]['labels']['vent'] = Version() tool_d[c_name]['labels']['vent.namespace'] = s[section][ 'namespace'] tool_d[c_name]['labels']['vent.branch'] = branch tool_d[c_name]['labels']['vent.version'] = version tool_d[c_name]['labels']['vent.name'] = s[section]['name'] tool_d[c_name]['labels']['vent.section'] = section tool_d[c_name]['labels']['vent.repo'] = s[section]['repo'] tool_d[c_name]['labels']['vent.type'] = s[section]['type'] # check for log_config settings in external-services externally_configured = False vent_config = Template(self.path_dirs.cfg_file) for ext_tool in vent_config.section('external-services')[1]: if ext_tool[0].lower() == 'syslog': try: log_dict = json.loads(ext_tool[1]) # configure if not locally active if ('locally_active' not in log_dict or log_dict['locally_active'] == 'no'): del log_dict['locally_active'] log_config = {} log_config['type'] = 'syslog' log_config['config'] = {} ip_address = '' port = '' for option in log_dict: if option == 'ip_address': ip_address = log_dict[option] elif option == 'port': port = log_dict['port'] syslog_address = 'tcp://' + ip_address + ':' + port syslog_config = { 'syslog-address': syslog_address, 'syslog-facility': 'daemon', 'tag': '{{.Name}}' } log_config['config'].update(syslog_config) externally_configured = True except Exception as e: # pragma: no cover self.logger.error('external settings for log_config' " couldn't be stored because: " + str(e)) externally_configured = False if not externally_configured: log_config = { 'type': 'syslog', 'config': { 'syslog-address': 'tcp://0.0.0.0:514', 'syslog-facility': 'daemon', 'tag': '{{.Name}}' } } if 'groups' in s[section]: # add labels for groups tool_d[c_name]['labels']['vent.groups'] = s[section]['groups'] # add restart=always to core containers if 'core' in s[section]['groups']: tool_d[c_name]['restart_policy'] = {'Name': 'always'} # map network names to environment variables if 'network' in s[section]['groups']: vent_config = Template(template=self.path_dirs.cfg_file) nic_mappings = vent_config.section('network-mapping') nics = '' if nic_mappings[0]: for nic in nic_mappings[1]: nics += nic[0] + ':' + nic[1] + ',' nics = nics[:-1] if nics: if 'environment' in tool_d[c_name]: tool_d[c_name]['environment'].append('VENT_NICS=' + nics) else: tool_d[c_name]['environment'] = [ 'VENT_NICS=' + nics ] # send logs to syslog if ('syslog' not in s[section]['groups'] and 'core' in s[section]['groups']): log_config['config']['tag'] = '{{.Name}}' tool_d[c_name]['log_config'] = log_config if 'syslog' not in s[section]['groups']: tool_d[c_name]['log_config'] = log_config # mount necessary directories if 'files' in s[section]['groups']: ulimits = [] ulimits.append( docker.types.Ulimit(name='nofile', soft=1048576, hard=1048576)) tool_d[c_name]['ulimits'] = ulimits # check if running in a docker container if 'VENT_CONTAINERIZED' in environ and environ[ 'VENT_CONTAINERIZED'] == 'true': if 'volumes_from' in tool_d[c_name]: tool_d[c_name]['volumes_from'].append( environ['HOSTNAME']) else: tool_d[c_name]['volumes_from'] = [ environ['HOSTNAME'] ] else: if 'volumes' in tool_d[c_name]: tool_d[c_name]['volumes'][ self.path_dirs.base_dir[:-1]] = { 'bind': '/vent', 'mode': 'ro' } else: tool_d[c_name]['volumes'] = { self.path_dirs.base_dir[:-1]: { 'bind': '/vent', 'mode': 'ro' } } if files[0]: if 'volumes' in tool_d[c_name]: tool_d[c_name]['volumes'][files[1]] = { 'bind': '/files', 'mode': 'rw' } else: tool_d[c_name]['volumes'] = { files[1]: { 'bind': '/files', 'mode': 'rw' } } else: tool_d[c_name]['log_config'] = log_config # add label for priority if 'settings' in overall_dict: try: options_dict = overall_dict['settings'] for option in options_dict: if option == 'priority': tool_d[c_name]['labels'][ 'vent.priority'] = options_dict[option] except Exception as e: # pragma: no cover self.logger.error('unable to store settings options ' 'for docker ' + str(e)) # only start tools that have been built if s[section]['built'] != 'yes': del tool_d[c_name] # store section information for adding info to manifest later else: tool_d[c_name]['section'] = section return status, tool_d
def test_option(): """ Test the option function """ instance = Template() instance.option('foo', 'bar')
def prep_start(self, repo=None, name=None, groups=None, enabled="yes", branch="master", version="HEAD", run_build=False): """ Start a set of tools that match the parameters given, if no parameters are given, start all installed tools on the master branch at verison HEAD that are enabled """ args = locals() self.logger.info("Starting: prep_start") self.logger.info("Arguments: " + str(args)) status = (True, None) tool_dict = {} try: del args['run_build'] options = [ 'name', 'namespace', 'built', 'groups', 'path', 'image_name', 'branch', 'version' ] vent_config = Template(template=self.vent_config) files = vent_config.option('main', 'files') sections, template = self.plugin.constraint_options(args, options) for section in sections: # initialize needed vars template_path = os.path.join(sections[section]['path'], 'vent.template') container_name = sections[section]['image_name'].replace( ':', '-') container_name = container_name.replace('/', '-') image_name = sections[section]['image_name'] # checkout the right version and branch of the repo self.plugin.branch = branch self.plugin.version = version cwd = os.getcwd() self.logger.info("current directory is: " + str(cwd)) os.chdir(os.path.join(sections[section]['path'])) status = self.plugin.checkout() self.logger.info(status) os.chdir(cwd) if run_build: status = self.build(name=sections[section]['name'], groups=groups, enabled=enabled, branch=branch, version=version) self.logger.info(status) # set docker settings for container vent_template = Template(template_path) status = vent_template.section('docker') self.logger.info(status) tool_dict[container_name] = { 'image': image_name, 'name': container_name } if status[0]: for option in status[1]: options = option[1] # check for commands to evaluate if '`' in options: cmds = options.split('`') # TODO this probably needs better error checking to handle mismatched `` if len(cmds) > 2: i = 1 while i < len(cmds): try: cmds[i] = subprocess.check_output( shlex.split(cmds[i]), stderr=subprocess.STDOUT, close_fds=True).strip() except Exception as e: # pragma: no cover self.logger.error( "unable to evaluate command specified in vent.template: " + str(e)) i += 2 options = "".join(cmds) # store options set for docker try: tool_dict[container_name][ option[0]] = ast.literal_eval(options) except Exception as e: # pragma: no cover self.logger.error( "unable to store the options set for docker: " + str(e)) tool_dict[container_name][option[0]] = options # get temporary name for links, etc. status = vent_template.section('info') self.logger.info(status) plugin_config = Template(template=self.plugin.manifest) status, plugin_sections = plugin_config.sections() self.logger.info(status) for plugin_section in plugin_sections: status = plugin_config.option(plugin_section, "link_name") self.logger.info(status) image_status = plugin_config.option( plugin_section, "image_name") self.logger.info(image_status) if status[0] and image_status[0]: cont_name = image_status[1].replace(':', '-') cont_name = cont_name.replace('/', '-') if cont_name not in tool_dict: tool_dict[cont_name] = { 'image': image_status[1], 'name': cont_name, 'start': False } tool_dict[cont_name]['tmp_name'] = status[1] # add extra labels if 'labels' not in tool_dict[container_name]: tool_dict[container_name]['labels'] = {} tool_dict[container_name]['labels']['vent'] = Version() tool_dict[container_name]['labels'][ 'vent.namespace'] = sections[section]['namespace'] tool_dict[container_name]['labels']['vent.branch'] = branch tool_dict[container_name]['labels']['vent.version'] = version tool_dict[container_name]['labels']['vent.name'] = sections[ section]['name'] if 'groups' in sections[section]: # add labels for groups tool_dict[container_name]['labels'][ 'vent.groups'] = sections[section]['groups'] # send logs to syslog if 'syslog' not in sections[section][ 'groups'] and 'core' in sections[section]['groups']: tool_dict[container_name]['log_config'] = { 'type': 'syslog', 'config': { 'syslog-address': 'tcp://0.0.0.0:514', 'syslog-facility': 'daemon', 'tag': 'core' } } if 'syslog' not in sections[section]['groups']: tool_dict[container_name]['log_config'] = { 'type': 'syslog', 'config': { 'syslog-address': 'tcp://0.0.0.0:514', 'syslog-facility': 'daemon', 'tag': 'plugin' } } # mount necessary directories if 'files' in sections[section]['groups']: if 'volumes' in tool_dict[container_name]: tool_dict[container_name]['volumes'][ self.plugin.path_dirs.base_dir[:-1]] = { 'bind': '/vent', 'mode': 'ro' } else: tool_dict[container_name]['volumes'] = { self.plugin.path_dirs.base_dir[:-1]: { 'bind': '/vent', 'mode': 'ro' } } if files[0]: tool_dict[container_name]['volumes'][files[1]] = { 'bind': '/files', 'mode': 'ro' } else: tool_dict[container_name]['log_config'] = { 'type': 'syslog', 'config': { 'syslog-address': 'tcp://0.0.0.0:514', 'syslog-facility': 'daemon', 'tag': 'plugin' } } # add label for priority status = vent_template.section('settings') self.logger.info(status) if status[0]: for option in status[1]: if option[0] == 'priority': tool_dict[container_name]['labels'][ 'vent.priority'] = option[1] # only start tools that have been built if sections[section]['built'] != 'yes': del tool_dict[container_name] # check and update links, volumes_from, network_mode for container in tool_dict.keys(): if 'links' in tool_dict[container]: for link in tool_dict[container]['links']: for c in tool_dict.keys(): if 'tmp_name' in tool_dict[c] and tool_dict[c][ 'tmp_name'] == link: tool_dict[container]['links'][ tool_dict[c]['name']] = tool_dict[ container]['links'].pop(link) if 'volumes_from' in tool_dict[container]: tmp_volumes_from = tool_dict[container]['volumes_from'] tool_dict[container]['volumes_from'] = [] for volumes_from in list(tmp_volumes_from): for c in tool_dict.keys(): if 'tmp_name' in tool_dict[c] and tool_dict[c][ 'tmp_name'] == volumes_from: tool_dict[container]['volumes_from'].append( tool_dict[c]['name']) tmp_volumes_from.remove(volumes_from) tool_dict[container]['volumes_from'] += tmp_volumes_from if 'network_mode' in tool_dict[container]: if tool_dict[container]['network_mode'].startswith( 'container:'): network_c_name = tool_dict[container][ 'network_mode'].split('container:')[1] for c in tool_dict.keys(): if 'tmp_name' in tool_dict[c] and tool_dict[c][ 'tmp_name'] == network_c_name: tool_dict[container][ 'network_mode'] = 'container:' + tool_dict[ c]['name'] # remove tmp_names for c in tool_dict.keys(): if 'tmp_name' in tool_dict[c]: del tool_dict[c]['tmp_name'] # remove containers that shouldn't be started for c in tool_dict.keys(): if 'start' in tool_dict[c] and not tool_dict[c]['start']: del tool_dict[c] except Exception as e: self.logger.error("prep_start failed with error: " + str(e)) status = (False, e) status = (True, tool_dict) self.logger.info("Status of prep_start: " + str(status)) self.logger.info("Finished: prep_start") return status
def _build_manifest(self, matches): """ Builds and writes the manifest for the tools being added """ # !! TODO check for pre-existing that conflict with request and # disable and/or remove image for match in matches: # keep track of whether or not to write an additional manifest # entry for multiple instances, and how many additional entries # to write addtl_entries = 0 # remove the .git for adding repo info to manifest if self.repo.endswith('.git'): self.repo = self.repo[:-4] # remove @ in match for template setting purposes if match[0].find('@') >= 0: true_name = match[0].split('@')[1] else: true_name = match[0] template = Template(template=self.manifest) # TODO check for special settings here first for the specific match self.version = match[1] response = self.p_helper.checkout(branch=self.branch, version=self.version) if response[0]: section = self.org + ":" + self.name + ":" + true_name + ":" section += self.branch + ":" + self.version # need to get rid of temp identifiers for tools in same repo match_path = self.path + match[0].split('@')[0] if not self.core: image_name = self.org + "-" + self.name + "-" if match[0] != '': # if tool is in a subdir, add that to the name of the # image image_name += '-'.join(match[0].split('/')[1:]) + "-" image_name += self.branch + ":" + self.version else: image_name = ('cyberreboot/vent-' + match[0].split('/')[-1] + ':' + self.branch) image_name = image_name.replace('_', '-') # check if the section already exists exists, options = template.section(section) previous_commit = None previous_commits = None head = False if exists: for option in options: # TODO check if tool name but a different version # exists - then disable/remove if set if option[0] == 'version' and option[1] == 'HEAD': head = True if option[0] == 'built' and option[1] == 'yes': # !! TODO remove pre-existing image pass if option[0] == 'commit_id': previous_commit = option[1] if option[0] == 'previous_versions': previous_commits = option[1] # check if tool comes from multi directory multi_tool = "no" if match[0].find('@') >= 0: multi_tool = "yes" # !! TODO # check if section should be removed from config i.e. all tools # but new commit removed one that was in a previous commit image_name = image_name.lower() if image_name.endswith(":head"): image_name = image_name.split(":head")[0] + ":HEAD" # set template section & options for tool at version and branch template.add_section(section) template.set_option(section, "name", true_name.split('/')[-1]) template.set_option(section, "namespace", self.org + '/' + self.name) template.set_option(section, "path", match_path) template.set_option(section, "repo", self.repo) template.set_option(section, "enabled", "yes") template.set_option(section, "multi_tool", multi_tool) template.set_option(section, "branch", self.branch) template.set_option(section, "version", self.version) template.set_option(section, "last_updated", str(datetime.utcnow()) + " UTC") template.set_option(section, "image_name", image_name.replace('@', '-')) template.set_option(section, "type", "repository") # save settings in vent.template to plugin_manifest # watch for multiple tools in same directory # just wanted to store match path with @ for path for use in # other actions tool_template = 'vent.template' if match[0].find('@') >= 0: tool_template = match[0].split('@')[1] + '.template' vent_template_path = join(match_path, tool_template) if os.path.exists(vent_template_path): with open(vent_template_path) as f: vent_template_val = f.read() else: vent_template_val = '' settings_dict = ParsedSections(vent_template_val) for setting in settings_dict: template.set_option(section, setting, json.dumps(settings_dict[setting])) # TODO do we need this if we save as a dictionary? vent_template = Template(vent_template_path) vent_status, response = vent_template.option("info", "name") if vent_status: template.set_option(section, "link_name", response) else: template.set_option(section, "link_name", true_name.split('/')[-1]) commit_id = None if self.version == 'HEAD': # remove @ in multi-tools chdir(match_path) cmd = "git rev-parse --short HEAD" commit_id = check_output(shlex.split(cmd), stderr=STDOUT, close_fds=True).strip() template.set_option(section, "commit_id", commit_id) if head: # no need to store previous commits if not HEAD, since # the version will always be the same commit ID if previous_commit and previous_commit != commit_id: if (previous_commits and previous_commit not in previous_commits): previous_commits = (previous_commit + ',' + previous_commits) elif not previous_commits: previous_commits = previous_commit if previous_commits and previous_commits != commit_id: template.set_option(section, "previous_versions", previous_commits) if self.version_alias: template.set_option(section, "version_alias", self.version_alias) if self.groups: template.set_option(section, "groups", self.groups) else: groups = vent_template.option("info", "groups") if groups[0]: template.set_option(section, "groups", groups[1]) # set groups to empty string if no groups defined for tool else: template.set_option(section, "groups", '') template = self._build_image(template, match_path, image_name, section) # write additional entries for multiple instances if addtl_entries > 0: # add 2 for naming conventions for i in range(2, addtl_entries + 2): addtl_section = section.rsplit(':', 2) addtl_section[0] += str(i) addtl_section = ':'.join(addtl_section) template.add_section(addtl_section) orig_vals = template.section(section)[1] for val in orig_vals: template.set_option(addtl_section, val[0], val[1]) template.set_option(addtl_section, "name", true_name.split('/')[-1] + str(i)) # write out configuration to the manifest file template.write_config() # reset to repo directory chdir(self.path) return
def _build_manifest(self, matches): """ Builds and writes the manifest for the tools being added """ # !! TODO check for pre-existing that conflict with request and disable and/or remove image for match in matches: template = Template(template=self.manifest) # !! TODO check for special settings here first for the specific match self.version = match[1] response = self.checkout() if response[0]: section = self.org + ":" + self.name + ":" + match[ 0] + ":" + self.branch + ":" + self.version match_path = self.path + match[0] image_name = self.org + "-" + self.name + "-" if match[0] != '': # if tool is in a subdir, add that to the name of the image image_name += '-'.join(match[0].split('/')[1:]) + "-" image_name += self.branch + ":" + self.version # check if the section already exists exists, options = template.section(section) previous_commit = None previous_commits = None head = False if exists: for option in options: # TODO check if tool name but a different version exists - then disable/remove if set if option[0] == 'version' and option[1] == 'HEAD': head = True if option[0] == 'built' and option[1] == 'yes': # !! TODO remove pre-existing image pass if option[0] == 'commit_id': previous_commit = option[1] if option[0] == 'previous_versions': previous_commits = option[1] # !! TODO # check if section should be removed from config - i.e. all tools, # but new commit removed one that was in a previous commit # set template section and options for tool at version and branch template.add_section(section) template.set_option(section, "name", match[0].split('/')[-1]) template.set_option(section, "namespace", self.org + '/' + self.name) template.set_option(section, "path", match_path) template.set_option(section, "repo", self.repo) template.set_option(section, "enabled", "yes") template.set_option(section, "branch", self.branch) template.set_option(section, "version", self.version) template.set_option(section, "last_updated", str(datetime.datetime.utcnow()) + " UTC") template.set_option(section, "image_name", image_name) vent_template = Template( template=os.path.join(match_path, 'vent.template')) vent_status, response = vent_template.option("info", "name") if vent_status: template.set_option(section, "link_name", response) else: template.set_option(section, "link_name", match[0].split('/')[-1]) commit_id = None if self.version == 'HEAD': os.chdir(match_path) commit_id = subprocess.check_output( shlex.split("git rev-parse --short HEAD"), stderr=subprocess.STDOUT, close_fds=True).strip() template.set_option(section, "commit_id", commit_id) if head: # no need to store previous commits if not HEAD, since # the version will always be the same commit ID if previous_commit and previous_commit != commit_id: if previous_commits and previous_commit not in previous_commits: previous_commits = previous_commit + ',' + previous_commits elif not previous_commits: previous_commits = previous_commit if previous_commits and previous_commits != commit_id: template.set_option(section, "previous_versions", previous_commits) if self.version_alias: template.set_option(section, "version_alias", self.version_alias) if self.groups: template.set_option(section, "groups", self.groups) else: vent_template = os.path.join(match_path, 'vent.template') if os.path.exists(vent_template): v_template = Template(template=vent_template) groups = v_template.option("info", "groups") if groups[0]: template.set_option(section, "groups", groups[1]) template = self._build_image(template, match_path, image_name, section) # write out configuration to the manifest file template.write_config() # reset to repo directory os.chdir(self.path) return
def start_containers(self, container, tool_d, s_containers, f_containers): """ Start container that was passed in and return status """ # use section to add info to manifest section = tool_d[container]['section'] del tool_d[container]['section'] manifest = Template(self.manifest) try: c = self.d_client.containers.get(container) c.start() s_containers.append(container) manifest.set_option(section, 'running', 'yes') self.logger.info('started ' + str(container) + ' with ID: ' + str(c.short_id)) except Exception as err: try: gpu = 'gpu.enabled' failed = False if (gpu in tool_d[container]['labels'] and tool_d[container]['labels'][gpu] == 'yes'): vent_config = Template(template=self.path_dirs.cfg_file) port = '' host = '' result = vent_config.option('nvidia-docker-plugin', 'port') if result[0]: port = result[1] else: port = '3476' result = vent_config.option('nvidia-docker-plugin', 'host') if result[0]: host = result[1] else: # now just requires ip, ifconfig try: route = check_output( ('ip', 'route')).decode('utf-8').split('\n') default = '' # grab the default network device. for device in route: if 'default' in device: default = device.split()[4] break # grab the IP address for the default device ip_addr = check_output( ('ifconfig', default)).decode('utf-8') ip_addr = ip_addr.split('\n')[1].split()[1] host = ip_addr except Exception as e: # pragma no cover self.logger.error('failed to grab ip. Ensure that \ ip and ifconfig are installed') nd_url = 'http://' + host + ':' + port + '/v1.0/docker/cli' params = {'vol': 'nvidia_driver'} r = requests.get(nd_url, params=params) if r.status_code == 200: options = r.text.split() for option in options: if option.startswith('--volume-driver='): tool_d[container][ 'volume_driver'] = option.split('=', 1)[1] elif option.startswith('--volume='): vol = option.split('=', 1)[1].split(':') if 'volumes' in tool_d[container]: if isinstance(tool_d[container]['volumes'], list): if len(vol) == 2: c_vol = vol[0] + \ ':' + vol[1] + ':rw' else: c_vol = vol[0] + ':' + \ vol[1] + ':' + vol[2] tool_d[container]['volumes'].append( c_vol) else: # Dictionary tool_d[container]['volumes'][ vol[0]] = { 'bind': vol[1], 'mode': vol[2] } else: tool_d[container]['volumes'] = { vol[0]: { 'bind': vol[1], 'mode': vol[2] } } elif option.startswith('--device='): dev = option.split('=', 1)[1] if 'devices' in tool_d[container]: tool_d[container]['devices'].append(dev + ':' + dev + ':rwm') else: tool_d[container]['devices'] = [ dev + ':' + dev + ':rwm' ] else: self.logger.error('Unable to parse ' + 'nvidia-docker option: ' + str(option)) else: failed = True f_containers.append(container) manifest.set_option(section, 'running', 'failed') self.logger.error('failed to start ' + str(container) + ' because nvidia-docker-plugin ' + 'failed with: ' + str(r.status_code)) if not failed: try: self.d_client.containers.remove(container, force=True) self.logger.info('removed old existing container: ' + str(container)) except Exception as e: pass cont_id = self.d_client.containers.run(detach=True, **tool_d[container]) s_containers.append(container) manifest.set_option(section, 'running', 'yes') self.logger.info('started ' + str(container) + ' with ID: ' + str(cont_id)) except Exception as e: # pragma: no cover f_containers.append(container) manifest.set_option(section, 'running', 'failed') self.logger.error('failed to start ' + str(container) + ' because: ' + str(e)) # save changes made to manifest manifest.write_config() return s_containers, f_containers
def create(self, group_view=False): """ Update with current tools """ self.add_handlers({"^T": self.quit, "^Q": self.quit}) self.add(npyscreen.TitleText, name='Select which tools to ' + self.action['action'] + ':', editable=False) togglable = ['remove', 'enable', 'disable', 'build'] if self.action['action_name'] in togglable: self.cur_view = self.add(npyscreen.TitleText, name='Group view:', value='all groups', editable=False, rely=3) self.add_handlers({"^V": self.toggle_view}) i = 5 else: i = 4 if self.action['action_name'] == 'start': response = self.action['api_action'].inventory(choices=[ 'repos', 'tools', 'built', 'enabled', 'running', 'core' ]) else: response = self.action['api_action'].inventory( choices=['core', 'repos', 'tools']) if response[0]: inventory = response[1] repos = inventory['repos'] # dict has repo as key and list of core/non-core tools as values has_core = {} has_non_core = {} # find all tools that are in this repo # and list them if they are core for repo in repos: core_list = [] ncore_list = [] # splice the repo names for processing if (repo.startswith("http")): repo_name = repo.rsplit("/", 2)[1:] else: repo_name = repo.split("/") # determine if enabled or disabled tools should be shown show_disabled = False if 'action_name' in self.action: if self.action['action_name'] == 'enable': show_disabled = True for tool in inventory['tools']: tool_repo_name = tool.split(":") # cross reference repo names if (repo_name[0] == tool_repo_name[0] and repo_name[1] == tool_repo_name[1]): # check to ensure tool not set to locally active = no # in vent.cfg externally_active = False vent_cfg_file = self.action['api_action'].vent_config vent_cfg = Template(vent_cfg_file) tool_pairs = vent_cfg.section('external-services')[1] for ext_tool in tool_pairs: if ext_tool[0].lower() == inventory['tools'][tool]: try: ext_tool_options = json.loads(ext_tool[1]) loc = 'locally_active' if (loc in ext_tool_options and ext_tool_options[loc] == 'no'): externally_active = True except Exception as e: self.logger.error("Couldn't check ext" " because: " + str(e)) externally_active = False # check to ensure not disabled disabled = False manifest = Template(self.api_action.plugin.manifest) if manifest.option(tool, 'enabled')[1] == 'no': disabled = True if (not externally_active and not disabled and not show_disabled): instance_num = re.search( r'\d+$', manifest.option(tool, 'name')[1]) if not instance_num: ncore_list.append(tool) # multiple instances share same image elif self.action[ 'action_name'] not in self.no_instance: ncore_list.append(tool) elif (not externally_active and disabled and show_disabled): instance_num = re.search( r'\d+$', manifest.option(tool, 'name')[1]) if not instance_num: ncore_list.append(tool) # multiple instances share same image elif self.action[ 'action_name'] not in self.no_instance: ncore_list.append(tool) for tool in inventory['core']: tool_repo_name = tool.split(":") # cross reference repo names if (repo_name[0] == tool_repo_name[0] and repo_name[1] == tool_repo_name[1]): # check to ensure tool not set to locally active = no # in vent.cfg externally_active = False vent_cfg_file = self.action['api_action'].vent_config vent_cfg = Template(vent_cfg_file) tool_pairs = vent_cfg.section('external-services')[1] for ext_tool in tool_pairs: if ext_tool[0].lower() == inventory['core'][tool]: try: ext_tool_options = json.loads(ext_tool[1]) loc = 'locally_active' if (loc in ext_tool_options and ext_tool_options[loc] == 'no'): externally_active = True except Exception as e: self.logger.error("Couldn't check ext" " because: " + str(e)) externally_active = False # check to ensure not disabled disabled = False manifest = Template(self.api_action.plugin.manifest) if manifest.option(tool, 'enabled')[1] == 'no': disabled = True if (not externally_active and not disabled and not show_disabled): instance_num = re.search( r'\d+$', manifest.option(tool, 'name')[1]) if not instance_num: core_list.append(tool) # multiple instances share same image elif self.action[ 'action_name'] not in self.no_instance: core_list.append(tool) elif (not externally_active and disabled and show_disabled): instance_num = re.search( r'\d+$', manifest.option(tool, 'name')[1]) if not instance_num: core_list.append(tool) # multiple instances share same image elif self.action[ 'action_name'] not in self.no_instance: core_list.append(tool) has_core[repo] = core_list has_non_core[repo] = ncore_list for repo in repos: self.tools_tc[repo] = {} if self.action['cores']: # make sure only repos with core tools are displayed if has_core.get(repo): self.repo_widgets[repo] = self.add(npyscreen.TitleText, name='Plugin: ' + repo, editable=False, rely=i, relx=5) for tool in has_core[repo]: tool_name = tool.split(":", 2)[2].split("/")[-1] if tool_name == "": tool_name = "/" self.tools_tc[repo][tool] = self.add( npyscreen.CheckBox, name=tool_name, value=True, relx=10) i += 1 i += 3 else: # make sure only repos with non-core tools are displayed if has_non_core.get(repo): self.repo_widgets[repo] = self.add(npyscreen.TitleText, name='Plugin: ' + repo, editable=False, rely=i, relx=5) for tool in has_non_core[repo]: tool_name = tool.split(":", 2)[2].split("/")[-1] if tool_name == "": tool_name = "/" self.tools_tc[repo][tool] = self.add( npyscreen.CheckBox, name=tool_name, value=True, relx=10) i += 1 i += 3 return
def cores(self, action, branch='master', version='HEAD'): """ Supply action (install, build, start, stop, clean) for core tools """ self.logger.info('Starting: cores') status = (False, None) try: self.logger.info('action provided: ' + str(action)) core = self.tools_status(True, branch=branch, version=version)[1] if action in ['install', 'build']: tools = [] core_repo = 'https://github.com/cyberreboot/vent' resp = self.p_helper.apply_path(core_repo) if resp[0]: cwd = resp[1] else: self.logger.info('apply_path failed. Exiting cores' ' with status ' + str(resp)) return resp path = os.path.join(self.plugin.path_dirs.plugins_dir, 'cyberreboot/vent') # TODO commenting out for now, should use update_repo # response = self.p_helper.checkout(branch=branch, # version=version) response = (True, None) self.logger.info('status of plugin checkout ' + str(response)) matches = self.p_helper.available_tools(path, version=version, groups='core') for match in matches: name = match[0].rsplit('/')[-1] constraints = {'name': name, 'repo': core_repo} prev_installed, _ = self.p_helper. \ constraint_options(constraints, []) if not prev_installed: tools.append((match[0], '')) # only add stuff not already installed or repo specification if ((tools) or (isinstance(matches, list) and len(matches) == 0)): status = self.plugin.add(core_repo, tools=tools, branch=branch, build=False, core=True) self.logger.info('status of plugin add: ' + str(status)) else: self.logger.info('no new tools to install') status = (True, 'previously installed') plugin_c = Template(template=self.plugin.manifest) sections = plugin_c.sections() for tool in core['normal']: for section in sections[1]: name = plugin_c.option(section, 'name') orig_branch = plugin_c.option(section, 'branch') namespace = plugin_c.option(section, 'namespace') version = plugin_c.option(section, 'version') if (name[1] == tool and orig_branch[1] == branch and namespace[1] == 'cyberreboot/vent' and version[1] == 'HEAD'): plugin_c.set_option(section, 'image_name', 'cyberreboot/vent-' + tool.replace('_', '-') + ':' + branch) plugin_c.write_config() chdir(cwd) if action == 'build': plugin_c = Template(template=self.plugin.manifest) sections = plugin_c.sections() try: for tool in core['normal']: for section in sections[1]: tool = tool.replace('_', '-') image_name = plugin_c.option(section, 'image_name') check_image = 'cyberreboot/vent-' check_image += tool + ':' + branch if image_name[1] == check_image: timestamp = str(datetime.utcnow()) + ' UTC' try: # currently can't use docker-py because it # returns a 404 on pull so no way to valid # if it worked or didn't image_id = None cmd = 'docker pull ' + check_image output = check_output(shlex.split(cmd), stderr=STDOUT).decode('utf-8') # image_name in format of (bool, image_name) name = image_name[1] d_client = docker.from_env() image_attrs = d_client.images.get(name) image_attrs = image_attrs.attrs image_id = image_attrs['Id'].split(':')[ 1][:12] if image_id: plugin_c.set_option(section, 'built', 'yes') plugin_c.set_option(section, 'image_id', image_id) plugin_c.set_option(section, 'last_updated', timestamp) status = (True, 'Pulled ' + tool) self.logger.info(str(status)) else: plugin_c.set_option(section, 'built', 'failed') plugin_c.set_option(section, 'last_updated', timestamp) status = (False, 'Failed to pull image ' + str(output.split('\n')[-1])) self.logger.error(str(status)) except Exception as e: # pragma: no cover plugin_c.set_option(section, 'built', 'failed') plugin_c.set_option(section, 'last_updated', timestamp) status = (False, 'Failed to pull image ' + str(e)) self.logger.error(str(status)) except Exception as e: # pragma: no cover status = (False, 'Failed to pull images ' + str(e)) self.logger.error(str(status)) plugin_c.write_config() elif action == 'start': status = self.api_action.prep_start(groups='core', branch=branch) if status[0]: tool_d = status[1] status = self.api_action.start(tool_d) elif action == 'stop': status = self.api_action.stop(groups='core', branch=branch) elif action == 'clean': status = self.api_action.clean(groups='core', branch=branch) except Exception as e: # pragma: no cover self.logger.info('core failed with error: ' + str(e)) status = (False, e) self.logger.info('Status of core: ' + str(status[0])) self.logger.info('Finished: core') return status
def Services(core, vent=True, external=False, **kargs): """ Get services that have exposed ports, expects param core to be True or False based on which type of services to return, by default limit to vent containers and processes not running externally, if not limited by vent containers, then core is ignored. """ services = [] path_dirs = PathDirs(**kargs) template = Template(template=path_dirs.cfg_file) services_uri = template.option("main", "services_uri") try: # look for internal services if not external: d_client = docker.from_env() if vent: c_filter = {'label': 'vent'} containers = d_client.containers.list(filters=c_filter) else: containers = d_client.containers.list() for c in containers: uri_prefix = '' uri_postfix = '' uri_user = '' uri_pw = '' name = None if vent and 'vent.name' in c.attrs['Config']['Labels']: if ((core and 'vent.groups' in c.attrs['Config']['Labels'] and 'core' in c.attrs['Config']['Labels']['vent.groups']) or (not core and 'vent.groups' in c.attrs['Config']['Labels'] and 'core' not in c.attrs['Config']['Labels']['vent.groups'])): name = c.attrs['Config']['Labels']['vent.name'] if 'uri_prefix' in c.attrs['Config']['Labels']: uri_prefix = c.attrs['Config']['Labels'][ 'uri_prefix'] if 'uri_postfix' in c.attrs['Config']['Labels']: uri_postfix = c.attrs['Config']['Labels'][ 'uri_postfix'] if 'uri_user' in c.attrs['Config']['Labels']: uri_user = "******" uri_user += c.attrs['Config']['Labels']['uri_user'] if 'uri_pw' in c.attrs['Config']['Labels']: uri_pw = " pw:" uri_pw += c.attrs['Config']['Labels']['uri_pw'] else: name = c.name ports = c.attrs['NetworkSettings']['Ports'] p = [] for port in ports: if ports[port]: uri_creds = '' if uri_user or uri_pw: uri_creds = " - (" + uri_user + uri_pw + " )" host = ports[port][0]['HostIp'] if services_uri[0] and host == '0.0.0.0': host = services_uri[1] p.append(uri_prefix + host + ":" + ports[port][0]['HostPort'] + uri_postfix + uri_creds) if p and name: services.append((name, p)) # look for external services else: ext_tools = template.section('external-services')[1] for ext_tool in ext_tools: try: name = ext_tool[0].lower() p = [] settings_dict = json.loads(ext_tool[1]) if ('locally_active' in settings_dict and settings_dict['locally_active'] == 'no'): # default protocol to display will be http protocol = 'http' ip_address = '' port = '' for setting in settings_dict: if setting == 'ip_address': ip_address = settings_dict[setting] if setting == 'port': port = settings_dict[setting] if setting == 'protocol': protocol = settings_dict[setting] p.append(protocol + '://' + ip_address + ':' + port) if p and name: services.append((name, p)) except Exception: # pragma: no cover p = None except Exception as e: # pragma: no cover pass return services
def auto_install(self): """ Automatically detects images and installs them in the manifest if they are not there already """ template = Template(template=self.manifest) sections = template.sections() images = self.d_client.images.list(filters={'label': 'vent'}) add_sections = [] status = (True, None) for image in images: if ('Labels' in image.attrs and 'vent.section' in image.attrs['Config']['Labels'] and not image.attrs['Config']['Labels']['vent.section'] in sections[1]): section = image.attrs['Config']['Labels']['vent.section'] section_str = image.attrs['Config']['Labels'][ 'vent.section'].split(":") template.add_section(section) if 'vent.name' in image.attrs['Config']['Labels']: template.set_option( section, 'name', image.attrs['Config']['Labels']['vent.name']) if 'vent.repo' in image.attrs['Config']['Labels']: template.set_option( section, 'repo', image.attrs['Config']['Labels']['vent.repo']) git_path = join(self.path_dirs.plugins_dir, "/".join(section_str[:2])) if not isdir(git_path): # clone it down status = self.p_helper.clone( image.attrs['Config']['Labels']['vent.repo']) template.set_option(section, 'path', join(git_path, section_str[-3][1:])) # get template settings # TODO account for template files not named vent.template v_template = Template(template=join( git_path, section_str[-3][1:], 'vent.template')) tool_sections = v_template.sections() if tool_sections[0]: for s in tool_sections[1]: section_dict = {} options = v_template.options(s) if options[0]: for option in options[1]: option_name = option if option == 'name': # get link name template.set_option( section, "link_name", v_template.option(s, option)[1]) option_name = 'link_name' opt_val = v_template.option(s, option)[1] section_dict[option_name] = opt_val if section_dict: template.set_option(section, s, json.dumps(section_dict)) if ('vent.type' in image.attrs['Config']['Labels'] and image.attrs['Config']['Labels']['vent.type'] == 'repository'): template.set_option(section, 'namespace', "/".join(section_str[:2])) template.set_option(section, 'enabled', 'yes') template.set_option(section, 'branch', section_str[-2]) template.set_option(section, 'version', section_str[-1]) template.set_option(section, 'last_updated', str(datetime.utcnow()) + " UTC") template.set_option(section, 'image_name', image.attrs['RepoTags'][0]) template.set_option(section, 'type', 'repository') if 'vent.groups' in image.attrs['Config']['Labels']: template.set_option( section, 'groups', image.attrs['Config']['Labels']['vent.groups']) template.set_option(section, 'built', 'yes') template.set_option(section, 'image_id', image.attrs['Id'].split(":")[1][:12]) template.set_option(section, 'running', 'no') # check if image is running as a container containers = self.d_client.containers.list( filters={'label': 'vent'}) for container in containers: if container.attrs['Image'] == image.attrs['Id']: template.set_option(section, 'running', 'yes') add_sections.append(section) template.write_config() if status[0]: status = (True, add_sections) return status
def _build_image(self, template, match_path, image_name, section, build_local=False): """ Build docker images and store results in template """ self.logger.info('Starting: _build_image') status = True def set_instances(template, section, built, image_id=None): """ Set build information for multiple instances """ self.logger.info('entering set_instances') i = 2 while True: addtl_section = section.rsplit(':', 2) addtl_section[0] += str(i) addtl_section = ':'.join(addtl_section) self.logger.info(addtl_section) if template.section(addtl_section)[0]: template.set_option(addtl_section, 'built', built) if image_id: template.set_option(addtl_section, 'image_id', image_id) template.set_option(addtl_section, 'last_updated', Timestamp()) else: break i += 1 # determine whether a tool should be considered a multi instance try: settings_dict = json.loads(template.option(section, 'settings')[1]) if int(settings_dict['instances']) > 1: multi_instance = True else: multi_instance = False except Exception: multi_instance = False status = False # !! TODO return status of whether it built successfully or not if self.build: cwd = getcwd() chdir(match_path) try: name = template.option(section, 'name') groups = template.option(section, 'groups') repo = template.option(section, 'repo') t_type = template.option(section, 'type') path = template.option(section, 'path') must_build = self.fill_config(path[1]) if groups[1] == '' or not groups[0]: groups = (True, 'none') if not name[0]: name = (True, image_name) pull = False image_exists = False output = '' cfg_template = Template(template=self.path_dirs.cfg_file) use_existing_image = False result = cfg_template.option('build-options', 'use_existing_images') if result[0]: use_existing_image = result[1] if use_existing_image == 'yes' and not must_build: try: self.d_client.images.get(image_name) i_attrs = self.d_client.images.get(image_name).attrs image_id = i_attrs['Id'].split(':')[1][:12] template.set_option(section, 'built', 'yes') template.set_option(section, 'image_id', image_id) template.set_option(section, 'last_updated', str(datetime.utcnow()) + ' UTC') # set other instances too if multi_instance: set_instances(template, section, 'yes', image_id) status = (True, 'Found ' + image_name) self.logger.info(str(status)) image_exists = True except docker.errors.ImageNotFound: image_exists = False except Exception as e: # pragma: no cover self.logger.warning('Failed to query Docker for images' ' because: ' + str(e)) if not image_exists: # pull if '/' in image_name, fallback to build if '/' in image_name and not build_local and not must_build: try: # currently can't use docker-py because it doesn't support # support labels on images yet self.logger.info('Trying to pull ' + image_name) output = check_output( shlex.split('docker pull ' + image_name), stderr=STDOUT, close_fds=True).decode('utf-8') self.logger.info('Pulling ' + name[1] + '\n' + output) i_attrs = self.d_client.images.get( image_name).attrs image_id = i_attrs['Id'].split(':')[1][:12] if image_id: template.set_option(section, 'built', 'yes') template.set_option(section, 'image_id', image_id) template.set_option( section, 'last_updated', str(datetime.utcnow()) + ' UTC') # set other instances too if multi_instance: set_instances(template, section, 'yes', image_id) status = (True, 'Pulled ' + image_name) self.logger.info(str(status)) else: template.set_option(section, 'built', 'failed') template.set_option( section, 'last_updated', str(datetime.utcnow()) + ' UTC') # set other instances too if multi_instace: set_instances(template, section, 'failed') status = (False, 'Failed to pull image ' + str(output.split('\n')[-1])) self.logger.warning(str(status)) pull = True except Exception as e: # pragma: no cover self.logger.warning('Failed to pull image, going' ' to build instead: ' + str(e)) status = False if not pull and not image_exists: # get username to label built image with username = getpass.getuser() # see if additional tags needed for images tagged at HEAD commit_tag = '' if image_name.endswith('HEAD'): commit_id = template.option(section, 'commit_id') if commit_id[0]: commit_tag = (' -t ' + image_name[:-4] + str(commit_id[1])) # see if additional file arg needed for building multiple # images from same directory file_tag = ' .' multi_tool = template.option(section, 'multi_tool') if multi_tool[0] and multi_tool[1] == 'yes': specific_file = template.option(section, 'name')[1] if specific_file == 'unspecified': file_tag = ' -f Dockerfile .' else: file_tag = ' -f Dockerfile.' + specific_file + ' .' # update image name with new version for update image_name = image_name.rsplit(':', 1)[0] + ':' + self.branch output = check_output( shlex.split('docker build --label' ' vent --label' ' vent.section=' + section + ' --label' ' vent.repo=' + repo[1] + ' --label' ' vent.type=' + t_type[1] + ' --label' ' vent.name=' + name[1] + ' --label' ' vent.groups=' + groups[1] + ' --label' + ' built-by=' + username + ' -t ' + image_name + commit_tag + file_tag), stderr=STDOUT, close_fds=True).decode('utf-8') self.logger.info('Building ' + name[1] + '\n' + output) image_id = '' for line in output.split('\n'): suc_str = 'Successfully built ' if line.startswith(suc_str): image_id = line.split(suc_str)[1].strip() template.set_option(section, 'built', 'yes') template.set_option(section, 'image_id', image_id) template.set_option(section, 'last_updated', str(datetime.utcnow()) + ' UTC') # set other instances too if multi_instance: set_instances(template, section, 'yes', image_id) except Exception as e: # pragma: no cover self.logger.info('current working directory: ' + str(os.getcwd())) self.logger.error('unable to build image: ' + str(image_name) + ' because: ' + str(e) + ' and ' + str(output)) template.set_option(section, 'built', 'failed') template.set_option(section, 'last_updated', str(datetime.utcnow()) + ' UTC') if multi_instance: set_instances(template, section, 'failed') status = False chdir(cwd) else: template.set_option(section, 'built', 'no') template.set_option(section, 'last_updated', str(datetime.utcnow()) + ' UTC') if multi_instance: set_instances(template, section, 'no') template.set_option(section, 'running', 'no') self.logger.info('Status of _build_image: ' + str(status)) self.logger.info('Finished: _build_image:') return template
def tools_status(self, core, branch="master", version="HEAD", **kargs): """ Get tools that are currently installed/built/running and also the number of repos that those tools come from; can toggle whether looking for core tools or plugin tools """ # !! TODO this might need to store namespaces/branches/versions all_tools = {'built': [], 'running': [], 'installed': [], 'normal': []} core_repo = 'https://github.com/cyberreboot/vent' repos = set() tools = Tools(**kargs) # get manifest file manifest = os.path.join(self.api_action.plugin.path_dirs.meta_dir, "plugin_manifest.cfg") template = Template(template=manifest) tools = template.sections() # get repos if core: p_helper = PluginHelper(plugins_dir='.internals/plugins/') repos.add(core_repo) else: p_helper = PluginHelper(plugins_dir='plugins/') for tool in tools[1]: repo = template.option(tool, 'repo') if repo[0] and repo[1] != core_repo: repos.add(repo[1]) # get normal tools for repo in repos: status, _ = p_helper.clone(repo) if status: p_helper.apply_path(repo) p_helper.checkout(branch=branch, version=version) path, _, _ = p_helper.get_path(repo, core=core) matches = None if core: matches = p_helper.available_tools(path, version=version, groups='core') else: matches = p_helper.available_tools(path, version=version) for match in matches: if core: all_tools['normal'].append(match[0].split('/')[-1].replace('_', '-')) else: all_tools['normal'].append(match[0].split('/')[-1]) # get tools that have been installed for tool in tools[1]: repo = template.option(tool, "repo") if repo[0] and repo[1] in repos: name = template.option(tool, "name") if name[0]: all_tools['installed'].append(name[1].replace('_', '-')) # get tools that have been built and/or are running try: d_client = docker.from_env() images = d_client.images.list(filters={'label': 'vent'}) for image in images: try: core_check = ("vent.groups" in image.attrs['Config']['Labels'] and 'core' in image.attrs['Config']['Labels']['vent.groups']) image_check = None if core: image_check = core_check else: image_check = not core_check if image_check: if ('vent.name' in image.attrs['Config']['Labels'] and 'hidden' not in image.attrs['Config']['Labels']['vent.groups']): if core: all_tools['built'].append(image.attrs['Config']['Labels']['vent.name'].replace('_', '-')) else: all_tools['built'].append(image.attrs['Config']['Labels']['vent.name']) except Exception as err: # pragma: no cover self.logger.error("image_check went wrong " + str(err)) containers = d_client.containers.list(filters={'label': 'vent'}) for container in containers: try: core_check = ("vent.groups" in container.attrs['Config']['Labels'] and 'core' in container.attrs['Config']['Labels']['vent.groups']) container_check = None if core: container_check = core_check else: container_check = not core_check if container_check: if ('vent.name' in container.attrs['Config']['Labels'] and 'hidden' not in image.attrs['Config']['Labels']['vent.groups']): if core: all_tools['running'].append(container.attrs['Config']['Labels']['vent.name'].replace('_', '-')) else: all_tools['running'].append(container.attrs['Config']['Labels']['vent.name']) except Exception as err: # pragma: no cover self.logger.error("core_check went wrong " + str(err)) except Exception as e: # pragma: no cover self.logger.error("Something with docker went wrong " + str(e)) return (len(repos), all_tools)
def cores(self, action, branch="master"): """ Supply action (install, build, start, stop, clean) for core tools """ self.logger.info("Starting: cores") status = (True, None) try: self.logger.info("action provided: "+str(action)) core = Core(branch=branch) if action in ["install", "build"]: tools = [] plugins = Plugin(plugins_dir=".internals/plugins") plugins.version = 'HEAD' plugins.branch = branch plugins.apply_path('https://github.com/cyberreboot/vent') response = plugins.checkout() self.logger.info("status of plugin checkout "+str(response)) matches = plugins._available_tools(groups='core') for match in matches: tools.append((match[0], '')) status = plugins.add('https://github.com/cyberreboot/vent', tools=tools, branch=branch, build=False) self.logger.info("status of plugin add: "+str(status)) plugin_config = Template(template=self.plugin.manifest) sections = plugin_config.sections() for tool in core['normal']: for section in sections[1]: name = plugin_config.option(section, "name") orig_branch = plugin_config.option(section, "branch") namespace = plugin_config.option(section, "namespace") version = plugin_config.option(section, "version") if name[1] == tool and orig_branch[1] == branch and namespace[1] == "cyberreboot/vent" and version[1] == "HEAD": plugin_config.set_option(section, "image_name", "cyberreboot/vent-"+tool+":"+branch) plugin_config.write_config() if action == "build": plugin_config = Template(template=self.plugin.manifest) sections = plugin_config.sections() try: for tool in core['normal']: for section in sections[1]: image_name = plugin_config.option(section, "image_name") if image_name[1] == "cyberreboot/vent-"+tool+":"+branch: try: # currently can't use docker-py because it # returns a 404 on pull so no way to valid if it # worked or didn't #image_id = self.d_client.images.pull('cyberreboot/vent-'+tool, tag=branch) image_id = None output = subprocess.check_output(shlex.split("docker pull cyberreboot/vent-"+tool+":"+branch), stderr=subprocess.STDOUT) for line in output.split('\n'): if line.startswith("Digest: sha256:"): image_id = line.split("Digest: sha256:")[1][:12] if image_id: plugin_config.set_option(section, "built", "yes") plugin_config.set_option(section, "image_id", image_id) plugin_config.set_option(section, "last_updated", str(datetime.datetime.utcnow()) + " UTC") status = (True, "Pulled "+tool) self.logger.info(str(status)) else: plugin_config.set_option(section, "built", "failed") plugin_config.set_option(section, "last_updated", str(datetime.datetime.utcnow()) + " UTC") status = (False, "Failed to pull image "+str(output.split('\n')[-1])) self.logger.error(str(status)) except Exception as e: # pragma: no cover plugin_config.set_option(section, "built", "failed") plugin_config.set_option(section, "last_updated", str(datetime.datetime.utcnow()) + " UTC") status = (False, "Failed to pull image "+str(e)) self.logger.error(str(status)) except Exception as e: # pragma: no cover status = (False, "Failed to pull images "+str(e)) self.logger.error(str(status)) plugin_config.write_config() elif action == "start": status = self.prep_start(groups="core", branch=branch) if status[0]: tool_dict = status[1] status = self.start(tool_dict) elif action == "stop": status = self.stop(groups="core", branch=branch) elif action == "clean": status = self.clean(groups="core", branch=branch) except Exception as e: self.logger.info("core failed with error: "+str(e)) status = (False, e) self.logger.info("Status of core: "+str(status)) self.logger.info("Finished: core") return status
def cores(self, action, branch="master"): """ Supply action (install, build, start, stop, clean) for core tools """ self.logger.info("Starting: cores") status = (True, None) try: self.logger.info("action provided: " + str(action)) core = Core(branch=branch) if action in ["install", "build"]: tools = [] plugins = Plugin(plugins_dir=".internals/plugins") plugins.version = 'HEAD' plugins.branch = branch plugins.apply_path('https://github.com/cyberreboot/vent') response = plugins.checkout() self.logger.info("status of plugin checkout " + str(response)) matches = plugins._available_tools(groups='core') for match in matches: tools.append((match[0], '')) status = plugins.add('https://github.com/cyberreboot/vent', tools=tools, branch=branch, build=False) self.logger.info("status of plugin add: " + str(status)) plugin_config = Template(template=self.plugin.manifest) sections = plugin_config.sections() for tool in core['normal']: for section in sections[1]: name = plugin_config.option(section, "name") orig_branch = plugin_config.option(section, "branch") namespace = plugin_config.option(section, "namespace") version = plugin_config.option(section, "version") if name[1] == tool and orig_branch[ 1] == branch and namespace[ 1] == "cyberreboot/vent" and version[ 1] == "HEAD": plugin_config.set_option( section, "image_name", "cyberreboot/vent-" + tool + ":" + branch) plugin_config.write_config() if action == "build": plugin_config = Template(template=self.plugin.manifest) sections = plugin_config.sections() try: for tool in core['normal']: for section in sections[1]: image_name = plugin_config.option( section, "image_name") if image_name[ 1] == "cyberreboot/vent-" + tool + ":" + branch: try: # currently can't use docker-py because it # returns a 404 on pull so no way to valid if it # worked or didn't #image_id = self.d_client.images.pull('cyberreboot/vent-'+tool, tag=branch) image_id = None output = subprocess.check_output( shlex.split( "docker pull cyberreboot/vent-" + tool + ":" + branch), stderr=subprocess.STDOUT) for line in output.split('\n'): if line.startswith("Digest: sha256:"): image_id = line.split( "Digest: sha256:")[1][:12] if image_id: plugin_config.set_option( section, "built", "yes") plugin_config.set_option( section, "image_id", image_id) plugin_config.set_option( section, "last_updated", str(datetime.datetime.utcnow()) + " UTC") status = (True, "Pulled " + tool) self.logger.info(str(status)) else: plugin_config.set_option( section, "built", "failed") plugin_config.set_option( section, "last_updated", str(datetime.datetime.utcnow()) + " UTC") status = (False, "Failed to pull image " + str(output.split('\n')[-1])) self.logger.error(str(status)) except Exception as e: # pragma: no cover plugin_config.set_option( section, "built", "failed") plugin_config.set_option( section, "last_updated", str(datetime.datetime.utcnow()) + " UTC") status = (False, "Failed to pull image " + str(e)) self.logger.error(str(status)) except Exception as e: # pragma: no cover status = (False, "Failed to pull images " + str(e)) self.logger.error(str(status)) plugin_config.write_config() elif action == "start": status = self.prep_start(groups="core", branch=branch) if status[0]: tool_dict = status[1] status = self.start(tool_dict) elif action == "stop": status = self.stop(groups="core", branch=branch) elif action == "clean": status = self.clean(groups="core", branch=branch) except Exception as e: self.logger.info("core failed with error: " + str(e)) status = (False, e) self.logger.info("Status of core: " + str(status)) self.logger.info("Finished: core") return status
def cores(self, action, branch="master", version='HEAD'): """ Supply action (install, build, start, stop, clean) for core tools """ self.logger.info("Starting: cores") status = (False, None) try: self.logger.info("action provided: " + str(action)) core = self.tools_status(True, branch=branch, version=version)[1] if action in ["install", "build"]: tools = [] core_repo = 'https://github.com/cyberreboot/vent' resp = self.p_helper.apply_path(core_repo) if resp[0]: cwd = resp[1] else: self.logger.info("apply_path failed. Exiting cores" " with status " + str(resp)) return resp path = os.path.join(self.plugin.path_dirs.plugins_dir, 'cyberreboot/vent') response = self.p_helper.checkout(branch=branch, version=version) self.logger.info("status of plugin checkout " + str(response)) matches = self.p_helper.available_tools(path, version=version, groups='core') for match in matches: name = match[0].rsplit('/')[-1] constraints = {'name': name, 'repo': core_repo} prev_installed, _ = self.p_helper. \ constraint_options(constraints, []) if not prev_installed: tools.append((match[0], '')) # only add stuff not already installed or repo specification if ((tools) or (isinstance(matches, list) and len(matches) == 0)): status = self.plugin.add(core_repo, tools=tools, branch=branch, build=False, core=True) self.logger.info("status of plugin add: " + str(status)) else: self.logger.info("no new tools to install") status = (True, "previously installed") plugin_c = Template(template=self.plugin.manifest) sections = plugin_c.sections() for tool in core['normal']: for section in sections[1]: name = plugin_c.option(section, "name") orig_branch = plugin_c.option(section, "branch") namespace = plugin_c.option(section, "namespace") version = plugin_c.option(section, "version") if (name[1] == tool and orig_branch[1] == branch and namespace[1] == "cyberreboot/vent" and version[1] == "HEAD"): plugin_c.set_option(section, "image_name", "cyberreboot/vent-" + tool.replace('_', '-') + ":" + branch) plugin_c.write_config() chdir(cwd) if action == "build": plugin_c = Template(template=self.plugin.manifest) sections = plugin_c.sections() try: for tool in core['normal']: for section in sections[1]: tool = tool.replace('_', '-') image_name = plugin_c.option(section, "image_name") check_image = "cyberreboot/vent-" check_image += tool + ":" + branch if image_name[1] == check_image: timestamp = str(datetime.utcnow()) + " UTC" try: # currently can't use docker-py because it # returns a 404 on pull so no way to valid # if it worked or didn't image_id = None cmd = "docker pull " + check_image output = check_output(shlex.split(cmd), stderr=STDOUT) # image_name in format of (bool, image_name) name = image_name[1] d_client = docker.from_env() image_attrs = d_client.images.get(name) image_attrs = image_attrs.attrs image_id = image_attrs['Id'].split(':')[1][:12] if image_id: plugin_c.set_option(section, "built", "yes") plugin_c.set_option(section, "image_id", image_id) plugin_c.set_option(section, "last_updated", timestamp) status = (True, "Pulled " + tool) self.logger.info(str(status)) else: plugin_c.set_option(section, "built", "failed") plugin_c.set_option(section, "last_updated", timestamp) status = (False, "Failed to pull image " + str(output.split('\n')[-1])) self.logger.error(str(status)) except Exception as e: # pragma: no cover plugin_c.set_option(section, "built", "failed") plugin_c.set_option(section, "last_updated", timestamp) status = (False, "Failed to pull image " + str(e)) self.logger.error(str(status)) except Exception as e: # pragma: no cover status = (False, "Failed to pull images " + str(e)) self.logger.error(str(status)) plugin_c.write_config() elif action == "start": status = self.api_action.prep_start(groups="core", branch=branch) if status[0]: tool_d = status[1] status = self.api_action.start(tool_d) elif action == "stop": status = self.api_action.stop(groups="core", branch=branch) elif action == "clean": status = self.api_action.clean(groups="core", branch=branch) except Exception as e: # pragma: no cover self.logger.info("core failed with error: " + str(e)) status = (False, e) self.logger.info("Status of core: " + str(status[0])) self.logger.info("Finished: core") return status
def _build_manifest(self, matches): """ Builds and writes the manifest for the tools being added """ # !! TODO check for pre-existing that conflict with request and disable and/or remove image for match in matches: template = Template(template=self.manifest) # !! TODO check for special settings here first for the specific match self.version = match[1] response = self.checkout() if response[0]: section = self.org + ":" + self.name + ":" + match[0] + ":" + self.branch + ":" + self.version match_path = self.path + match[0] image_name = self.org + "-" + self.name + "-" if match[0] != '': # if tool is in a subdir, add that to the name of the image image_name += '-'.join(match[0].split('/')[1:]) + "-" image_name += self.branch + ":" + self.version # check if the section already exists exists, options = template.section(section) previous_commit = None previous_commits = None head = False if exists: for option in options: # TODO check if tool name but a different version exists - then disable/remove if set if option[0] == 'version' and option[1] == 'HEAD': head = True if option[0] == 'built' and option[1] == 'yes': # !! TODO remove pre-existing image pass if option[0] == 'commit_id': previous_commit = option[1] if option[0] == 'previous_versions': previous_commits = option[1] # !! TODO # check if section should be removed from config - i.e. all tools, # but new commit removed one that was in a previous commit # set template section and options for tool at version and branch template.add_section(section) template.set_option(section, "name", match[0].split('/')[-1]) template.set_option(section, "namespace", self.org+'/'+self.name) template.set_option(section, "path", match_path) template.set_option(section, "repo", self.repo) template.set_option(section, "enabled", "yes") template.set_option(section, "branch", self.branch) template.set_option(section, "version", self.version) template.set_option(section, "last_updated", str(datetime.datetime.utcnow()) + " UTC") template.set_option(section, "image_name", image_name) vent_template = Template(template=os.path.join(match_path, 'vent.template')) vent_status, response = vent_template.option("info", "name") if vent_status: template.set_option(section, "link_name", response) else: template.set_option(section, "link_name", match[0].split('/')[-1]) commit_id = None if self.version == 'HEAD': os.chdir(match_path) commit_id = subprocess.check_output(shlex.split("git rev-parse --short HEAD"), stderr=subprocess.STDOUT, close_fds=True).strip() template.set_option(section, "commit_id", commit_id) if head: # no need to store previous commits if not HEAD, since # the version will always be the same commit ID if previous_commit and previous_commit != commit_id: if previous_commits and previous_commit not in previous_commits: previous_commits = previous_commit+','+previous_commits elif not previous_commits: previous_commits = previous_commit if previous_commits and previous_commits != commit_id: template.set_option(section, "previous_versions", previous_commits) if self.version_alias: template.set_option(section, "version_alias", self.version_alias) if self.groups: template.set_option(section, "groups", self.groups) else: vent_template = os.path.join(match_path, 'vent.template') if os.path.exists(vent_template): v_template = Template(template=vent_template) groups = v_template.option("info", "groups") if groups[0]: template.set_option(section, "groups", groups[1]) template = self._build_image(template, match_path, image_name, section) # write out configuration to the manifest file template.write_config() # reset to repo directory os.chdir(self.path) return
def Services(core, vent=True, external=False, **kargs): """ Get services that have exposed ports, expects param core to be True or False based on which type of services to return, by default limit to vent containers and processes not running externally, if not limited by vent containers, then core is ignored. """ services = [] path_dirs = PathDirs(**kargs) template = Template(template=path_dirs.cfg_file) services_uri = template.option('main', 'services_uri') try: # look for internal services if not external: d_client = docker.from_env() if vent: c_filter = {'label': 'vent'} containers = d_client.containers.list(filters=c_filter) else: containers = d_client.containers.list() for c in containers: uris = {} name = None if vent and 'vent.name' in c.attrs['Config']['Labels']: if ((core and 'vent.groups' in c.attrs['Config']['Labels'] and 'core' in c.attrs['Config']['Labels']['vent.groups']) or (not core and 'vent.groups' in c.attrs['Config']['Labels'] and 'core' not in c.attrs['Config']['Labels']['vent.groups'])): name = c.attrs['Config']['Labels']['vent.name'] if name == '': name = c.attrs['Config']['Labels'][ 'vent.namespace'].split('/')[1] for label in c.attrs['Config']['Labels']: if label.startswith('uri'): try: val = int(label[-1]) if val not in uris: uris[val] = {} uris[val][label[:-1]] = c.attrs['Config'][ 'Labels'][label] except Exception as e: # pragma: no cover logger.error('Malformed services section' ' in the template file ' + str(e)) else: name = c.name if name and 'vent.repo' in c.attrs['Config']['Labels']: name = c.attrs['Config']['Labels']['vent.repo'].split( '/')[-1] + ': ' + name ports = c.attrs['NetworkSettings']['Ports'] p = [] port_num = 1 for port in ports: if ports[port]: try: service_str = '' if 'uri_prefix' in uris[port_num]: service_str += uris[port_num]['uri_prefix'] host = ports[port][0]['HostIp'] if services_uri[0] and host == '0.0.0.0': host = services_uri[1] service_str += host + ':' service_str += ports[port][0]['HostPort'] if 'uri_postfix' in uris[port_num]: service_str += uris[port_num]['uri_postfix'] uri_creds = '' if 'uri_user' in uris[port_num]: uri_creds += ' user:'******'uri_user'] if 'uri_pw' in uris[port_num]: uri_creds += ' pw:' uri_creds += uris[port_num]['uri_pw'] if uri_creds: service_str += ' - (' + uri_creds + ' )' p.append(service_str) except Exception as e: # pragma: no cover logger.info('No services defined for ' + str(name) + ' with exposed port ' + str(port_num) + ' because: ' + str(e)) port_num += 1 if p and name: services.append((name, p)) logger.info(services) # look for external services else: ext_tools = template.section('external-services')[1] for ext_tool in ext_tools: try: name = ext_tool[0].lower() p = [] settings_dict = json.loads(ext_tool[1]) if ('locally_active' in settings_dict and settings_dict['locally_active'] == 'no'): # default protocol to display will be http protocol = 'http' ip_address = '' port = '' for setting in settings_dict: if setting == 'ip_address': ip_address = settings_dict[setting] if setting == 'port': port = settings_dict[setting] if setting == 'protocol': protocol = settings_dict[setting] p.append(protocol + '://' + ip_address + ':' + port) if p and name: services.append((name, p)) except Exception: # pragma: no cover p = None except Exception as e: # pragma: no cover logger.error('Could not get services ' + str(e)) return services
def prep_start(self, repo=None, name=None, groups=None, enabled="yes", branch="master", version="HEAD", run_build=False): """ Start a set of tools that match the parameters given, if no parameters are given, start all installed tools on the master branch at verison HEAD that are enabled """ args = locals() self.logger.info("Starting: prep_start") self.logger.info("Arguments: "+str(args)) status = (True, None) tool_dict = {} try: del args['run_build'] options = ['name', 'namespace', 'built', 'groups', 'path', 'image_name', 'branch', 'version'] vent_config = Template(template=self.vent_config) files = vent_config.option('main', 'files') sections, template = self.plugin.constraint_options(args, options) for section in sections: # initialize needed vars template_path = os.path.join(sections[section]['path'], 'vent.template') container_name = sections[section]['image_name'].replace(':','-') container_name = container_name.replace('/','-') image_name = sections[section]['image_name'] # checkout the right version and branch of the repo self.plugin.branch = branch self.plugin.version = version cwd = os.getcwd() self.logger.info("current directory is: "+str(cwd)) os.chdir(os.path.join(sections[section]['path'])) status = self.plugin.checkout() self.logger.info(status) os.chdir(cwd) if run_build: status = self.build(name=sections[section]['name'], groups=groups, enabled=enabled, branch=branch, version=version) self.logger.info(status) # set docker settings for container vent_template = Template(template_path) status = vent_template.section('docker') self.logger.info(status) tool_dict[container_name] = {'image':image_name, 'name':container_name} if status[0]: for option in status[1]: options = option[1] # check for commands to evaluate if '`' in options: cmds = options.split('`') # TODO this probably needs better error checking to handle mismatched `` if len(cmds) > 2: i = 1 while i < len(cmds): try: cmds[i] = subprocess.check_output(shlex.split(cmds[i]), stderr=subprocess.STDOUT, close_fds=True).strip() except Exception as e: # pragma: no cover self.logger.error("unable to evaluate command specified in vent.template: "+str(e)) i += 2 options = "".join(cmds) # store options set for docker try: tool_dict[container_name][option[0]] = ast.literal_eval(options) except Exception as e: # pragma: no cover self.logger.error("unable to store the options set for docker: "+str(e)) tool_dict[container_name][option[0]] = options # get temporary name for links, etc. status = vent_template.section('info') self.logger.info(status) plugin_config = Template(template=self.plugin.manifest) status, plugin_sections = plugin_config.sections() self.logger.info(status) for plugin_section in plugin_sections: status = plugin_config.option(plugin_section, "link_name") self.logger.info(status) image_status = plugin_config.option(plugin_section, "image_name") self.logger.info(image_status) if status[0] and image_status[0]: cont_name = image_status[1].replace(':','-') cont_name = cont_name.replace('/','-') if cont_name not in tool_dict: tool_dict[cont_name] = {'image':image_status[1], 'name':cont_name, 'start':False} tool_dict[cont_name]['tmp_name'] = status[1] # add extra labels if 'labels' not in tool_dict[container_name]: tool_dict[container_name]['labels'] = {} tool_dict[container_name]['labels']['vent'] = Version() tool_dict[container_name]['labels']['vent.namespace'] = sections[section]['namespace'] tool_dict[container_name]['labels']['vent.branch'] = branch tool_dict[container_name]['labels']['vent.version'] = version tool_dict[container_name]['labels']['vent.name'] = sections[section]['name'] if 'groups' in sections[section]: # add labels for groups tool_dict[container_name]['labels']['vent.groups'] = sections[section]['groups'] # send logs to syslog if 'syslog' not in sections[section]['groups'] and 'core' in sections[section]['groups']: tool_dict[container_name]['log_config'] = {'type':'syslog', 'config': {'syslog-address':'tcp://0.0.0.0:514', 'syslog-facility':'daemon', 'tag':'core'}} if 'syslog' not in sections[section]['groups']: tool_dict[container_name]['log_config'] = {'type':'syslog', 'config': {'syslog-address':'tcp://0.0.0.0:514', 'syslog-facility':'daemon', 'tag':'plugin'}} # mount necessary directories if 'files' in sections[section]['groups']: if 'volumes' in tool_dict[container_name]: tool_dict[container_name]['volumes'][self.plugin.path_dirs.base_dir[:-1]] = {'bind': '/vent', 'mode': 'ro'} else: tool_dict[container_name]['volumes'] = {self.plugin.path_dirs.base_dir[:-1]: {'bind': '/vent', 'mode': 'ro'}} if files[0]: tool_dict[container_name]['volumes'][files[1]] = {'bind': '/files', 'mode': 'ro'} else: tool_dict[container_name]['log_config'] = {'type':'syslog', 'config': {'syslog-address':'tcp://0.0.0.0:514', 'syslog-facility':'daemon', 'tag':'plugin'}} # add label for priority status = vent_template.section('settings') self.logger.info(status) if status[0]: for option in status[1]: if option[0] == 'priority': tool_dict[container_name]['labels']['vent.priority'] = option[1] # only start tools that have been built if sections[section]['built'] != 'yes': del tool_dict[container_name] # check and update links, volumes_from, network_mode for container in tool_dict.keys(): if 'links' in tool_dict[container]: for link in tool_dict[container]['links']: for c in tool_dict.keys(): if 'tmp_name' in tool_dict[c] and tool_dict[c]['tmp_name'] == link: tool_dict[container]['links'][tool_dict[c]['name']] = tool_dict[container]['links'].pop(link) if 'volumes_from' in tool_dict[container]: tmp_volumes_from = tool_dict[container]['volumes_from'] tool_dict[container]['volumes_from'] = [] for volumes_from in list(tmp_volumes_from): for c in tool_dict.keys(): if 'tmp_name' in tool_dict[c] and tool_dict[c]['tmp_name'] == volumes_from: tool_dict[container]['volumes_from'].append(tool_dict[c]['name']) tmp_volumes_from.remove(volumes_from) tool_dict[container]['volumes_from'] += tmp_volumes_from if 'network_mode' in tool_dict[container]: if tool_dict[container]['network_mode'].startswith('container:'): network_c_name = tool_dict[container]['network_mode'].split('container:')[1] for c in tool_dict.keys(): if 'tmp_name' in tool_dict[c] and tool_dict[c]['tmp_name'] == network_c_name: tool_dict[container]['network_mode'] = 'container:'+tool_dict[c]['name'] # remove tmp_names for c in tool_dict.keys(): if 'tmp_name' in tool_dict[c]: del tool_dict[c]['tmp_name'] # remove containers that shouldn't be started for c in tool_dict.keys(): if 'start' in tool_dict[c] and not tool_dict[c]['start']: del tool_dict[c] except Exception as e: self.logger.error("prep_start failed with error: "+str(e)) status = (False, e) status = (True, tool_dict) self.logger.info("Status of prep_start: "+str(status)) self.logger.info("Finished: prep_start") return status