def __init__( s ): # Valid commands s.commands = [ 'init', 'link', 'list', 'push', 'pull', 'pop', 'drop', 'help', ] # Read YAML: Grab link path from hidden YAML s.link_path_yaml = '.mflowgen.stash.yml' try: data = read_yaml( s.link_path_yaml ) s.link_path = data[ 'path' ] except Exception: s.link_path = '' # Read YAML: Grab metadata about the linked stash (e.g., stash hashes, # authors, messages) s.stash_yaml_path = s.link_path + '/.mflowgen.stash.yml' try: s.stash = read_yaml( s.stash_yaml_path ) except FileNotFoundError: s.stash = []
def find_construct_path(s, design, update): # Check for --update first if update: try: data = read_yaml('.mflowgen.yml') # get metadata construct_path = data['construct'] except Exception: print() print(bold('Error:'), 'No pre-existing build in current', 'directory for running --update') print() sys.exit(1) return construct_path # Search in the design directory if not os.path.exists(design): print() print(bold('Error:'), 'Directory not found at path', '"{}"'.format(design)) print() sys.exit(1) yaml_path = os.path.abspath(design + '/.mflowgen.yml') if not os.path.exists(yaml_path): construct_path = design + '/construct.py' else: data = read_yaml(yaml_path) try: construct_path = data['construct'] except KeyError: raise KeyError( 'YAML file "{}" must have key "construct"'.format( yaml_path)) if not construct_path.startswith('/'): # check if absolute path construct_path = design + '/' + construct_path construct_path = os.path.abspath(construct_path) if not os.path.exists(construct_path): raise ValueError('Construct script not found at "{}"'.format( construct_path)) return construct_path
def save_construct_path(s, construct_path): yaml_path = '.mflowgen.yml' try: data = read_yaml(yaml_path) except Exception: data = {} data['construct'] = construct_path write_yaml(data=data, path=yaml_path)
def dump_assertion_check_scripts(step_name, dir_name): yaml_path = dir_name + '/configure.yml' data = read_yaml(yaml_path) # Look at both preconditions and postconditions assertion_types = ['preconditions', 'postconditions'] for t in assertion_types: # If no pre/post conditions are defined, continue try: data[t] except KeyError: continue if not data[t]: continue # Each entry in the list specifies Python statement(s) that represent # a single test. We aggregate all the tests into the "tests_str" # string and dump it into the script all at once. We also collect any # explicit pytest scripts into the "pyfiles" list. tests_str = '' pyfiles = [] for i, entry in enumerate(data[t]): # If the entry specifies a pytest file, grab it if type(entry) == dict: try: pyfile = "'{}'".format(entry['pytest']) except KeyError: msg = '\nUnsupported assertion of type "dict" ' + \ 'in step "{}". '.format( step_name ) + \ 'If there is a colon in this assertion, you must ' + \ 'put quotes around the entire string ' + \ 'and properly escape the special characters inside ' + \ 'with YAML syntax:\n\n- {}\n'.format( entry ) print(msg) raise pyfiles.append(pyfile) # Otherwise, treat it as normal Python and wrap it up as a function # that pytest can run else: try: compile(entry, '', 'exec') # make sure it compiles as Python except Exception as e: # if it does not compile, complain nicely print() print('Exception in {} #{} for step {}'.format( t[:-1], i, step_name)) print() print(' >>> ' + entry) print() raise e # Generate a function that pytest can run # # - Wrap the test in a try except to rewrite the Assertion message # nchars = 0 # first N chars of entry appear in the function name func_name = str(i) + '_' + sanitize(entry[:nchars]) code = indent(entry, 4) #code_oneline = ' -> '.join( entry.splitlines() ) tests_str += template_pytest_str.format(name=func_name, code=code) # Dump the pytest functions to file by filling in a template fpath = dir_name + '/mflowgen-check-' + t + '.py' with open(fpath, 'w') as fd: fd.write( template_pytest_file.format( step=step_name, tests=tests_str, check_type=t, gen=os.path.abspath(__file__).rstrip('c'), pyfiles=', '.join(pyfiles))) # Make it executable os.chmod(fpath, os.stat(fpath).st_mode | stat.S_IEXEC)
def __init__(s, step_path, default=False): # Get the YAML file path # # If this is a default step, then we use the top-level steps directory s._config = {} if default: yaml_path = '/'.join( [get_top_dir(), 'steps', step_path, 'configure.yml']) else: yaml_path = '/'.join([step_path, 'configure.yml']) # Read the YAML data data = read_yaml(yaml_path) # Check that this is a valid step configuration assert 'name' in data.keys(), \ 'Step -- ' \ 'Step YAML must have a "name" field: {}'.format( yaml_path ) # Remove empty inputs and outputs if 'inputs' in data.keys(): if not data['inputs']: del (data['inputs']) if 'outputs' in data.keys(): if not data['outputs']: del (data['outputs']) # Check that any tagged outputs only have one key and one value if 'outputs' in data.keys(): for idx, o in enumerate(data['outputs']): if type(o) == dict: assert len(o.keys()) == 1, 'Step -- Invalid output' assert len(o.values()) == 1, 'Step -- Invalid output' # If commands are empty, replace with 'pass' if 'commands' not in data.keys(): data['commands'] = ['true'] if 'commands' in data.keys(): if data['commands'] == [] or data['commands'] == None: data['commands'] = ['true'] # Make sure we read the commands as strings # # - A shell command of 'true' mistakenly turns into a python boolean, # so convert it back into a (lowercase) string.. # assert type( data['commands'] ) == list, \ 'Step -- YAML "commands" must be a list: {}'.format( yaml_path ) for i, c in enumerate(data['commands']): if type(c) == bool: data['commands'][i] = str(c).lower() # Replace any output tag shorthands with the real files # # So this configuration YAML: # # outputs: # - foo1.txt # - foo2.txt # - ~: results/1/2/3/data.txt # # Turns into this: # # outputs = [ # - foo1.txt # - foo2.txt # - data.txt: results/1/2/3/data.txt # ] # if 'outputs' in data.keys(): for idx, o in enumerate(data['outputs']): if type(o) == dict: if o.keys()[0] == None: f = o.values()[0] data['outputs'][idx] = {os.path.basename(f): f} # Save additional metadata aside from the YAML data # # - Step directory -- we copy this when we instance a step in a build # - YAML name -- used to generate a parameterized YAML in a build # s.step_dir = \ os.path.relpath( os.path.dirname( yaml_path ), os.getcwd() ) data['source'] = os.path.dirname(os.path.abspath(yaml_path)) # Save the config s._config.update(data)
def launch_run(s, demo, design, backend): # construct_path -- Locate the construct script # # - Read the .mflowgen.yml metadata in the design directory # - If it does not exist, then use the default path of "construct.py" # yaml_path = os.path.abspath(design + '/.mflowgen.yml') if not os.path.exists(yaml_path): construct_path = design + '/construct.py' else: data = read_yaml(yaml_path) try: construct_path = data['construct'] except KeyError: raise KeyError( 'YAML file "{}" must have key "construct"'.format( yaml_path)) if not construct_path.startswith('/'): # check if absolute path construct_path = design + '/' + construct_path construct_path = os.path.abspath(construct_path) if not os.path.exists(construct_path): raise ValueError('Construct script not found at "{}"'.format( construct_path)) # Import the graph for this design c_dirname = os.path.dirname(construct_path) c_basename = os.path.splitext(os.path.basename(construct_path))[0] sys.path.append(c_dirname) try: construct = importlib.import_module(c_basename) except ImportError: raise ImportError( 'No module named construct in "{}"'.format(construct_path)) # Construct the graph g = construct.construct() # Generate the build files (e.g., Makefile) for the selected backend # build system if backend == 'make': backend_cls = MakeBackend elif backend == 'ninja': backend_cls = NinjaBackend b = BuildOrchestrator(g, backend_cls) b.build() # Done list_target = backend + " list" status_target = backend + " status" print("Targets: run \"" + list_target + "\" and \"" + status_target + "\"") print()