def template(template=None,connection_file=None,project_name=None): """ List templates and possibly create one for the user. Use the connection_file and project_name flags to set these options. Otherwise, only the template name is required. """ if not os.path.isdir('connections'): os.mkdir('connections') template_source = 'connection_templates.py' if sys.version_info<(3,0): templates = {} execfile(os.path.join(os.path.dirname(__file__),template_source),templates) for key in [i for i in templates if not re.match('^template_',i)]: templates.pop(key) asciitree({'templates':[re.match('^template_(.+)$',k).group(1) for k in templates.keys()]}) else: raise Exception('dev') #---if the user requests a template, write it for them if not template and not connection_file: print('[NOTE] rerun with e.g. '+ '`make template <template_name>` to make a new connection with the same name as the template. '+ 'you can also supply keyword arguments for the connection_file and project name') elif connection_file and not template: raise Exception('you must supply a template_name') elif template not in templates and 'template_%s'%template not in templates: raise Exception('cannot find template "%s"'%template) elif not connection_file and template: connection_file = template+'.yaml' #---write the template if template: fn = os.path.join('connections',connection_file) if not re.match('^.+\.yaml$',fn): fn = fn+'.yaml' with open(fn,'w') as fp: template_text = templates.get(template,templates['template_%s'%template]) if project_name: template_text = re.sub('^([^\s]+):','%s:'%project_name,template_text,flags=re.M) fp.write(template_text) print('[NOTE] wrote a new template to %s'%fn)
def avail(config=None, mods=None, **kwargs): """List available tests.""" build_dn = kwargs.pop('build', 'builds') toc_fn = kwargs.pop('toc_fn', 'docker.json') username = kwargs.pop('username', container_user) config_dict = read_config() if config == None: config = config_dict.get('docks_config', 'docker_config.py') if kwargs: raise Exception('unprocessed kwargs %s' % kwargs) # get the interpreted docker configuration instruct = interpret_docker_instructions(config=config, mods=mods) from datapack import asciitree tests_these = dict(tests=instruct.get('tests', {}).keys()) asciitree(tests_these) return tests_these
def __init__(self,**kwargs): self.root = 'calcs' self.ledger_fn = kwargs.pop('ledger','audit.yaml') self.debug = kwargs.pop('debug',False) if kwargs: raise Exception('unprocessed kwargs %s'%kwargs) status('welcome to the auditor') self.ledger = os.path.join(self.root,self.ledger_fn) if not os.path.isfile(self.ledger): raise Exception('cannot find %s'%self.ledger) else: with open(self.ledger) as fp: self.raw = yaml.load(fp.read()) # print everything asciitree(self.raw) self.interpret() if self.debug: import ipdb ipdb.set_trace()
def __init__(self, refresh=False): """ Create a factory environment from instructions in the config, and setup or refresh if necessary. """ #---create required folders for the factory for fn in self.required_folders: if not os.path.isdir(fn): os.mkdir(fn) #---get a copy of the configuration self.config = read_config() self.timestamp = self.config.get('setup_stamp', None) kind = self.config.get('species', None) if kind not in self.meta: msg = ( 'It looks like this is your first time.' 'To get started with the factory, you have to choose a virtual environment. ' 'Even if you have lots of dank packages installed on your linux box, we still use (at least) ' 'a virtualenv to make sure you have all of the correct dependencies. We recommend ' '`virtualenv` for users with lots of required packages, `virtualenv_sandbox` for those with ' 'major dependency issues (looking at you, Debian), and `anaconda` for advanced users who ' 'want that sweet, sweet 3D viz and protection against totally screwing up your window ' 'manager.') msg_instruct = 'Before continuing, run `make set species <name>` where the name comes '+\ 'from the following list: ' print('\n' + fab('WELCOME to the FACTORY', 'cyan_black') + '\n') print('\n'.join(textwrap.wrap(msg, width=80))) print('\n' + '\n'.join(textwrap.wrap(msg_instruct, width=80)) + '\n') asciitree({'envs': self.meta.keys()}) sys.exit(1) self.kind = kind for key in self.meta[kind]: self.__dict__[key] = self.meta[kind][key] #---make sure all requirements files are available no matter what do_refresh = (self.timestamp and self.check_spotchange()) or refresh #---environment creation is divided into two parts: first run and refreshes start_time = time.time() if not self.timestamp: getattr(self, self.setup_kickstart)() if not self.timestamp or do_refresh: getattr(self, self.setup_refresh)() if do_refresh or not self.timestamp: print('[NOTE] setup took %.1f minutes' % ((time.time() - start_time) / 60.)) #---register all changes and welcome the user to the plush new environment self.register_finished() if hasattr(self, self.welcome): getattr(self, self.welcome)()
def docker_recap(longest=True, log=False): """Summarize docker compile times.""" config = read_config() from datapack import asciitree docker_history = config.get('docker_history', {}) keys = list(set([i[0] for i in docker_history])) timings = dict([(key, {}) for key in keys]) for key in keys: stamps = [i[1] for i in docker_history if i[0] == key] timings[key]['timings'] = dict([ (s, '%.1f min' % (docker_history[(key, s)]['total_time'] / 60.)) for s in stamps ]) timings[key]['sub-timings'] = [ '%s, %.1f min' % (s['name'], s['elapsed'] / 60.) for s in docker_history[(key, s)]['series'] ] timings[key]['longest'] = max(timings[key]['timings'].values()) if not log: asciitree(timings) else: print(json.dumps(timings))
def inject_supervised_plot_tools(out, mode='supervised', silent=False): """ Add important tools to a dictionary which is later exported to the namespace for plotting. This function was centralized here so that both the interactive header and non-interactive execution modes can use it. """ import os, sys, re work = out['work'] #---save keys before the additions keys_incoming = set(out.keys()) #---import sequence from original header.py from . import store #---distribute the workspace to the store module #---...we have to distribute this way, or internalize these function store.work = work from .store import plotload, picturesave, picturesave_redacted from .tools import status if work.metadata.director.get('redacted', False): picturesave = picturesave_redacted #---handle latex and matplotlibrc here from config import read_config try: config = read_config() cwd = '.' #---if execution does not happen in the omnicalc root we are in interactive mode running from calcs except: cwd = '../' config = read_config(cwd=cwd) matplotlibrc_path = os.path.join( cwd, config.get('matplotlibrc', 'omni/plotter/matplotlibrc')) #---without an explicit matplotlibrc file, we check the config and then check for the latex binary if (os.path.basename(matplotlibrc_path) != 'matplotlibrc' or not os.path.isfile(matplotlibrc_path)): raise Exception('cannot find a file called "matplotlibrc" here: %s' % matplotlibrc_path) os.environ['MATPLOTLIBRC'] = os.path.abspath( os.path.join(os.getcwd(), os.path.dirname(matplotlibrc_path))) #---matplotlib is first loaded here import matplotlib as mpl if work.mpl_agg: mpl.use('Agg') import matplotlib.pyplot as plt #---we default to latex if it is available otherwise we consult config use_latex = config.get('use_latex', None) if use_latex == None: from distutils.spawn import find_executable use_latex = find_executable('latex') out.update(mpl=mpl, plt=plt) #---the plotter __init__.py sets the imports (importantly, including mpl,plt with modifications) from plotter.panels import panelplot, square_tiles from makeface import tracebacker from .hypothesis import hypothesis, sweeper from copy import deepcopy #---we would prefer not to import numpy here but it is important for backwards compatibility import numpy as np out.update(np=np, os=os, sys=sys, re=re) #---load custom functions out.update(plotload=plotload, picturesave=picturesave, status=status, panelplot=panelplot, square_tiles=square_tiles, tracebacker=tracebacker, hypothesis=hypothesis, sweeper=sweeper, deepcopy=deepcopy, np=np) #---add a plot supervisor instance and the autoplotter decorators from .autoplotters import PlotSupervisor, autoload, autoplot out.update(plotrun=PlotSupervisor(mode=mode)) #---we use str_types frequently for python 2,3 cross-compatibility str_types = [str, unicode] if sys.version_info < (3, 0) else [str] out.update(autoload=autoload, autoplot=autoplot, str_types=str_types) #---custom "art director" can be useful for coordinating aesthetics for different projects from plotter.art_director_importer import import_art_director, protected_art_words #---you can set the art director in the variables or ideally in the director art_director = work.metadata.director.get( 'art_director', work.metadata.variables.get('art_director', None)) #---always set protected variables to null for key in protected_art_words: out[key] = None if art_director: #---reload the art settings if they are already loaded mod_name = re.match('^(.+)\.py$', os.path.basename(art_director)).group(1) #---! switched from reload to a python3-compatible. would prefer to avoid pyc files. #---! currently disabled on the development branch (getting error module has no attribute reload) try: import importlib if mod_name in sys.modules: importlib.reload(sys.modules[mod_name]) art_vars = import_art_director(art_director, cwd=os.path.join(cwd, 'calcs')) #---unpack these into outgoing variables for key, val in art_vars.items(): out[key] = val except: status('cannot reload the art director', tag='warning') out['_plot_environment_keys'] = list(set(out.keys()) - keys_incoming) #---tell the user which variables are automagically loaded if silent: return status( 'the following variables are loaded into your plot script environment', tag='note') from datapack import asciitree def key_types(obj): """Organize injected variables for the user.""" if hasattr(obj, '__name__') and obj.__name__ in ['numpy']: return 'external' elif callable(obj): return 'function' elif hasattr(obj, '__class__') and obj.__class__.__name__ in [ 'WorkSpace', 'PlotSupervisor' ]: return 'instance' else: return 'variable' key_catalog = dict([(key, key_types(out[key])) for key in out]) report = collections.OrderedDict() for name in ['variable', 'function', 'instance']: report[name] = collections.OrderedDict() items = [(key, out[key]) for key in sorted(out) if key_catalog[key] == name] for key, val in items: if type(val).__name__ == 'classobj': report[name][key] = '<class \'%s\'>' % val.__name__ elif val.__class__.__name__ == 'module': report[name][key] = '<module \'%s\'>' % val.__name__ elif val.__class__.__name__ == 'function': report[name][key] = '<function \'%s\'>' % val.__name__ else: if val.__class__.__module__ in [ 'omnicalc', 'base.autoplotters' ]: report[name][key] = str(val) elif key in ['str_types']: report[name][key] = str(val) else: report[name][key] = val asciitree(dict({'plot_environment': report}))
if len(argvs)==1: if verbose: print('[NOTE] imported remotely from %s'%fn) print('[NOTE] added functions: %s'%(' '.join(new_funcs))) #---prune non-callables from the list of makeface functions for name,obj in list(makeface_funcs.items()): if not hasattr(obj,'__call__'): print('[WARNING] removing non-callable %s from makeface'%name) del makeface_funcs[name] #---command aliases for usability commands_aliases = configurator.get('commands_aliases',[]) #---environment handler env_prepend = configurator.get('activate_env','') if env_prepend: print('[STATUS] config.py: activate environment: "%s"'%env_prepend) if any([len(i)!=2 for i in commands_aliases]): raise Exception('commands_aliases must be a list of tuples that specify (target,alias) functions') #----fails on docs.py when looking for preplist for j,i in commands_aliases: if i not in makeface_funcs: raise Exception('cannot find target command-line function "%s" for alias "%s"'%(i,j)) #---note that we remove the original function after making the alias to avoid redundancy else: makeface_funcs[j] = makeface_funcs.pop(i) #---if no argument, make returns valid targets if len(argvs)==1: #---this formatting is read by the makefile to get the valid targets (please don't remove it) print('[STATUS] available make targets: %s'%(' '.join(makeface_funcs.keys()))) from datapack import asciitree asciitree({'make targets':list(sorted(makeface_funcs.keys()))}) print('[USAGE] `make <target> <args> <kwarg>="<val>" ...`') else: makeface(*argvs[1:])
def __repr__(self): """Readable debugging.""" asciitree({self.__class__.__name__: self.__dict__}) return '<%s instance at 0x%x>' % (self.__class__.__name__, id(self))
def megatest(instruct, via, check=False, clear=None): """ The test to end all unit tests. Run with `make megatest instruct=tests/megatest_v1.yaml via=logs`. If you ctrl+c out, then you have to remove the folder yourself (because some files are not written). !!! add keyboard exception that cleans up. """ import yaml, glob # test sequence comes from a separate file #! considered using wildcard to get tests matching a name from avail() spec = yaml.load(open(instruct).read()) # read a folder if not os.path.isdir(via): raise Exception( 'via argument %s must point to a folder with completed tests' % via) logs = glob.glob(os.path.join(via, '*')) # custom proof of work structure is interpreted here # proof of work originated in tests/testset.py, test # ensure pairs of log regex_log = '^(.*?)\.log$' regex_script = '^(.*?)\.script\.sh$' regex_raw = '^(.*?)\.yaml$' regex_special_summary = '### special summary (.*?)\n' base_logs, base_script = [ set([ re.match(r, j).group(1) for j in [os.path.basename(i) for i in logs] if re.match(r, j) ]) for r in [regex_log, regex_script] ] if base_logs != base_script: raise Exception('failed to pair all test records: %s' % list(set.symmetric_difference(base_logs, base_script))) test_names = list(base_logs) # loop over tests if not check and not clear: for name in spec['sequence']: name_spaceless = '_'.join(name.split()) if name_spaceless not in test_names: print('[STATUS] megatest is running test %s' % (name_spaceless)) # RUN THE TEST # note that you can set visit below to drop in and see the container without executing # ... which was useful for debugging the mounts test(*name.split(), back=True, wait=True, log=True, visit=False, dump_raw_test=os.path.join(via, '%s.yaml' % name_spaceless)) else: print( '[STATUS] megatest is skipping test %s because it is logged' % (name_spaceless)) else: import yaml from datapack import asciitree report = {} print('[STATUS] status report follows') for name in test_names: with open(os.path.join(via, '%s.log' % name), 'r') as fp: text = fp.read() passed = None != re.search('unit test is complete', text) report[name] = dict(passed=passed) # collect special instructions if passed try: with open(os.path.join(via, '%s.script.sh' % name), 'r') as fp: text = fp.read() special = re.search('### special summary (.*?)\n', text, re.M).group(1) report[name]['special'] = yaml.load(special) try: spot = re.search('^spot=(.*?)\n', text, re.M).group(1).strip() report[name]['spot'] = spot except: pass except: pass asciitree(report) if clear: failed = [k for k, v in report.items() if not v['passed']] for name in failed: try: try: out_dn = os.path.join( 'pier', report[name]['spot'], report[name]['special']['sim_name']) shutil.rmtree(out_dn) except: print('[WARNING] could not delete %s' % out_dn) try: os.system('docker rm %s' % name) except: pass for base_fn in ['%s.log', '%s.script.sh', '%s.yaml']: try: os.remove(os.path.join(via, base_fn % name)) except: pass except: print('[WARNING] perhaps failed to clear %s' % name)