示例#1
0
def ninja_info( w, build_dir ):

  build_id      = build_dir.split('-')[0]              # <- first number
  step_name     = '-'.join( build_dir.split('-')[1:])  # <- remainder
  target        = 'info-' + build_id

  command = get_top_dir() + '/mflowgen/scripts/mflowgen-letters' \
      + ' -c -t ' + step_name

  command = command + ' && ' + get_top_dir() \
      + '/mflowgen/scripts/mflowgen-info'    \
      + ' -y .mflowgen/' + build_dir + '/configure.yml'

  w.rule(
    name        = target,
    description = 'List info for the step',
    command     = command
  )
  w.newline()

  w.build(
    outputs = target,
    rule    = target,
  )
  w.newline()
示例#2
0
def ninja_graph_detailed( w, build_dirs ):

  build_dirs_commas = ','.join( build_dirs )

  python_graph_cmd = ' '.join([
    get_top_dir() + '/mflowgen/scripts/mflowgen-graph',
    '-t ' + build_dirs_commas,
    '-g .graph.dot',
    '-o .graph.subgraph.dot',
  ])

  command = ' && '.join([
    'ninja -t graph > .graph.dot',
    python_graph_cmd,
    'dot -Tps2 .graph.subgraph.dot > .graph.ps2',
    'ps2pdf .graph.ps2 graph.pdf',
  ])

  w.rule(
    name        = 'graph',
    description = 'graph: Generating the build graph',
    command     = command,
  )
  w.newline()

  w.build(
    outputs = 'graph',
    rule    = 'graph',
  )
  w.newline()
示例#3
0
def make_info( w, build_dir ):

  build_id      = build_dir.split('-')[0]              # <- first number
  step_name     = '-'.join( build_dir.split('-')[1:])  # <- remainder
  target        = 'info-' + build_id

  template_str  = '.PHONY: {target}\n'
  template_str += '\n'
  template_str += '{target}:\n'
  template_str += '	{command}\n'

  command = '@' + get_top_dir() + '/mflowgen/scripts/mflowgen-letters' \
      + ' -c -t ' + step_name

  command = command + ' && ' + get_top_dir() \
      + '/mflowgen/scripts/mflowgen-info'    \
      + ' -y .mflowgen/' + build_dir + '/configure.yml'

  w.write( template_str.format( target=target, command=command ) )
  w.newline()
示例#4
0
def make_runtimes( w ):

  template_str  = '.PHONY: runtimes\n'
  template_str += '\n'
  template_str += 'runtimes:\n'
  template_str += '	{command}\n'

  command = '@' + get_top_dir() + '/mflowgen/scripts/mflowgen-runtimes'

  w.write( template_str.format( command=command ) )
  w.newline()
示例#5
0
def make_status( w, steps ):

  steps_comma_separated = ','.join( steps )

  template_str  = '.PHONY: status\n'
  template_str += '\n'
  template_str += 'status:\n'
  template_str += '	{command}\n'

  command = '@' + get_top_dir() + '/mflowgen/scripts/mflowgen-status' \
            ' --backend make -s ' + steps_comma_separated

  w.write( template_str.format( command=command ) )
  w.newline()
示例#6
0
def ninja_runtimes( w ):

  w.rule(
    name        = 'runtimes',
    description = 'runtimes: Listing runtimes for each step',
    command     = get_top_dir() + '/mflowgen/scripts/mflowgen-runtimes',
  )
  w.newline()

  w.build(
    outputs = 'runtimes',
    rule    = 'runtimes',
  )
  w.newline()
示例#7
0
def ninja_status( w, steps ):

  steps_comma_separated = ','.join( steps )

  w.rule(
    name        = 'status',
    description = 'status: Listing status for each step',
    command     = get_top_dir() + '/mflowgen/scripts/mflowgen-status' \
                  ' --backend ninja -s ' + steps_comma_separated
  )
  w.newline()

  w.build(
    outputs = 'status',
    rule    = 'status',
  )
  w.newline()
示例#8
0
  def __init__( s ):

    s._edges_i = {}
    s._edges_o = {}
    s._steps   = {}

    # System paths to search for ADKs (i.e., analogous to python sys.path)
    #
    # The contents of the environment variable "MFLOWGEN_PATH" are
    # appended to sys path (i.e., analagous to how PYTHONPATH works)
    #

    s.sys_path = [
      get_top_dir() + '/adks',
    ]

    try:
      s.sys_path.extend( os.environ['MFLOWGEN_PATH'].split(':') )
    except KeyError:
      pass
示例#9
0
    def build(s):

        # Setup

        s.setup()

        # Pass useful data to the backend writer

        s.w.save(s.order, s.build_dirs, s.step_dirs)

        # Backend writer prologue

        s.w.gen_header()
        s.w.gen_prologue()

        # Keep track of build-system-specific dependency trackers

        backend_outputs = {}

        # Loop over all steps in topological order

        for i, step_name in enumerate(s.order):

            step = s.g.get_step(step_name)
            build_dir = s.build_dirs[step_name]
            build_id = s.build_ids[step_name]

            s.build_system_rules[step_name] = {}
            s.build_system_deps[step_name] = {}

            backend_outputs[step_name] = {}

            # Use the backend writer to generate the step header

            s.w.gen_step_header(step_name)

            #...................................................................
            # directory
            #...................................................................

            s.w.gen_step_directory_pre()

            # Make the directory dependent on all source files

            step_template_dir = s.step_dirs[step_name]
            deps = []
            #deps              = get_files_in_dir( step_template_dir )

            # Remove any broken symlinks from the dependency list

            deps_filtered = []
            for f in deps:
                try:
                    os.stat(f)
                    deps_filtered.append(f)
                except OSError as e:
                    pass
            deps = deps_filtered

            # Check if we are going to sandbox this step or symlink it

            sandbox = step.get_sandbox()

            # Rule
            #
            # - Remove the {dst}
            # - Copy the {src} to the {dst}
            # - This rule depends on {deps}
            # - {sandbox} True (copies src dir), False (symlinks src contents)
            #

            rule = {
                'dst': build_dir,
                'src': step_template_dir,
                'deps': deps,
                'sandbox': sandbox,
            }

            # Pull in any backend dependencies

            extra_deps = set()

            for edge in s.g.get_edges_i(step_name):
                src_step_name, src_f = edge.get_src()
                for o in backend_outputs[src_step_name]['alias']:
                    extra_deps.add(o)

            extra_deps = list(extra_deps)

            # Use the backend writer to generate the rule, and then grab any
            # backend dependencies

            t = s.w.gen_step_directory(extra_deps=extra_deps, **rule)

            backend_outputs[step_name]['directory'] = t

            # Metadata for customized backends

            s.build_system_rules[step_name]['directory'] = rule

            s.build_system_deps[step_name]['directory'] = set()

            for edge in s.g.get_edges_i(step_name):
                src_step_name, src_f = edge.get_src()
                s.build_system_deps[step_name]['directory'].add(
                    (src_step_name, 'alias'))

            #...................................................................
            # collect-inputs
            #...................................................................
            # For each incoming edge, trace back and collect the input (i.e.,
            # symlink the src step's output to this step's input).

            s.w.gen_step_collect_inputs_pre()

            # Pull in any backend dependencies

            extra_deps = backend_outputs[step_name]['directory']

            # Metadata for customized backends

            s.build_system_rules[step_name]['collect-inputs'] = []

            # Use the backend writer to generate rules for each input, and then
            # grab any backend dependencies

            backend_outputs[step_name]['collect-inputs'] = []

            for edge in s.g.get_edges_i(step_name):

                src_step_name, src_f = edge.get_src()
                dst_step_name, dst_f = edge.get_dst()

                link_src = s.build_dirs[src_step_name] + '/outputs/' + src_f
                link_dst = s.build_dirs[dst_step_name] + '/inputs/' + dst_f

                # Rule
                #
                # - Symlink the {src} to the {dst}
                # - This rule depends on {deps}
                #

                rule = {
                    'dst': link_dst,
                    'src': link_src,
                    'deps': [],
                }

                t = s.w.gen_step_collect_inputs(extra_deps=extra_deps, **rule)

                backend_outputs[step_name]['collect-inputs'] += t

                s.build_system_rules[step_name]['collect-inputs'].append(rule)

            # Metadata for customized backends

            s.build_system_deps[step_name]['collect-inputs'] = set()

            s.build_system_deps[step_name]['collect-inputs'].add(
                (step_name, 'directory'))

            #...................................................................
            # execute
            #...................................................................
            # Executing the step just involves running the commands script saved
            # in the hidden metadata directory.

            s.w.gen_step_execute_pre()

            # Outputs and commands

            outputs = [ build_dir + '/outputs/' + f \
                          for f in step.all_outputs_execute() ]

            if not outputs:
                outputs = [build_dir + '/execute-phony']
                phony = True
            else:
                phony = False

            meta_build_dir = s.metadata_dir + '/' + build_dir
            run_script = meta_build_dir + '/' + s.mflowgen_run
            debug_script = meta_build_dir + '/' + s.mflowgen_debug

            precond_script = meta_build_dir + '/' + s.mflowgen_precond
            postcond_script = meta_build_dir + '/' + s.mflowgen_postcond

            commands = ' && '.join([
              # Step banner in big letters
              get_top_dir() \
                  + '/mflowgen/scripts/mflowgen-letters -c -t ' + step_name,
              # Copy the command script to the build_dir
              'chmod +x {}'.format( run_script ),
              'cp -f {} {}'.format( run_script, build_dir ),
              # Copy the debug script to the build_dir if it exists
              'if [[ -e ' + debug_script + ' ]]; then' \
                  + ' chmod +x {} &&'.format( debug_script ) \
                  + ' cp -f {} {}; fi'.format( debug_script, build_dir ),
              # Copy the precondition script to the build_dir if it exists
              'if [[ -e ' + precond_script + ' ]]; then' \
                  + ' chmod +x {} &&'.format( precond_script ) \
                  + ' cp -f {} {}; fi'.format( precond_script, build_dir ),
              # Copy the postcondition script to the build_dir if it exists
              'if [[ -e ' + postcond_script + ' ]]; then' \
                  + ' chmod +x {} &&'.format( postcond_script ) \
                  + ' cp -f {} {}; fi'.format( postcond_script, build_dir ),
              # Go into the build directory
              'cd ' + build_dir,
              # Run the precondition checker if it exists
              'if [[ -e ' + s.mflowgen_precond + ' ]]; then' \
                  + ' ./{x} || exit 1; fi'.format( x=s.mflowgen_precond ),
              # Run the commands
              './{x} 2>&1 | tee {x}.log || exit 1'.format( x=s.mflowgen_run ),
              # Return to top so backends can assume we never changed directory
              'cd ..',
            ])

            # Rule
            #
            # - Run the {command}
            # - Generate the {outputs}
            # - This rule depends on {deps}
            #

            rule = {
                'outputs': outputs,
                'command': commands,
                'deps': [],
                'phony': phony,
            }

            # Pull in any backend dependencies

            extra_deps = set()

            for o in backend_outputs[step_name]['directory']:
                extra_deps.add(o)
            for o in backend_outputs[step_name]['collect-inputs']:
                extra_deps.add(o)

            extra_deps = list(extra_deps)

            # Use the backend writer to generate the rule, and then grab any
            # backend dependencies

            t = s.w.gen_step_execute(extra_deps=extra_deps, **rule)

            backend_outputs[step_name]['execute'] = t

            # Metadata for customized backends

            s.build_system_rules[step_name]['execute'] = rule

            s.build_system_deps[step_name]['execute'] = set()

            s.build_system_deps[step_name]['execute'].add(
                (step_name, 'directory'))

            s.build_system_deps[step_name]['execute'].add(
                (step_name, 'collect-inputs'))

            #...................................................................
            # collect-outputs
            #...................................................................
            # Outputs may be tagged or untagged in the YAML configuration:
            #
            #     outputs:
            #       - file1.txt : path/to/the/data.txt     <-- tagged
            #       - file2.txt                            <-- untagged
            #
            # Tagged outputs need to be symlinked to the 'outputs' directory.
            # Untagged outputs are assumed to be already in the 'outputs'
            # directory.
            #
            # Some backend build systems may need to process the untagged
            # outputs to build dependency edges (e.g., timestamping), so in this
            # section we collect rules for both tagged and untagged outputs.

            s.w.gen_step_collect_outputs_pre()

            # Pull in any backend dependencies

            extra_deps = backend_outputs[step_name]['execute']

            # Metadata for customized backends

            s.build_system_rules[step_name]['collect-outputs'] = { \
              'tagged'   : [],
              'untagged' : [],
            }

            # Use the backend writer to generate rules for each tagged output,
            # and then grab any backend dependencies

            backend_outputs[step_name]['collect-outputs'] = []

            for o in step.all_outputs_tagged():

                link_src = build_dir + '/' + o.values()[0]
                link_dst = build_dir + '/outputs/' + o.keys()[0]

                # Rule
                #
                # - Symlink the {src} to the {dst}
                # - This rule depends on {deps}
                #

                rule = {
                    'dst': link_dst,
                    'src': link_src,
                    'deps': [],
                }

                t = s.w.gen_step_collect_outputs_tagged(extra_deps=extra_deps,
                                                        **rule)

                backend_outputs[step_name]['collect-outputs'] += t

                d = s.build_system_rules[step_name]['collect-outputs']
                d['tagged'].append(rule)

            # Do whatever is necessary to the untagged outputs

            for o in step.all_outputs_untagged():

                f = build_dir + '/outputs/' + o

                # Rule
                #
                # - Do whatever is necessary to the untagged output {f}
                # - This rule depends on {deps}
                #

                rule = {
                    'f': f,
                    'deps': [],
                }

                t = s.w.gen_step_collect_outputs_untagged(
                    extra_deps=extra_deps, **rule)

                backend_outputs[step_name]['collect-outputs'] += t

                d = s.build_system_rules[step_name]['collect-outputs']
                d['untagged'].append(rule)

            # Metadata for customized backends

            s.build_system_deps[step_name]['collect-outputs'] = set()

            s.build_system_deps[step_name]['collect-outputs'].add(
                (step_name, 'execute'))

            #...................................................................
            # post-conditions
            #...................................................................
            # Here we assert post-conditions (if any)

            s.w.gen_step_post_conditions_pre()

            # Commands

            commands = ' && '.join([
              # Go into the build directory
              'cd ' + build_dir,
              # Run the postcondition checker if it exists
              'if [[ -e ' + s.mflowgen_postcond + ' ]]; then' \
                  + ' ./{x} || exit 1; fi'.format( x=s.mflowgen_postcond ),
              # Return to top so backends can assume we never changed directory
              'cd ..',
            ])

            # Rule
            #
            # - Run the {command}
            # - This rule depends on {deps}
            #

            rule = {
                'command': commands,
                'deps': [],
            }

            # Pull in any backend dependencies

            extra_deps = set()

            for o in backend_outputs[step_name]['execute']:
                extra_deps.add(o)
            for o in backend_outputs[step_name]['collect-outputs']:
                extra_deps.add(o)

            extra_deps = list(extra_deps)

            # Use the backend writer to generate the rule, and then grab any
            # backend dependencies

            t = s.w.gen_step_post_conditions(extra_deps=extra_deps, **rule)

            backend_outputs[step_name]['post-conditions'] = t

            # Metadata for customized backends

            s.build_system_rules[step_name]['post-conditions'] = rule

            s.build_system_deps[step_name]['post-conditions'] = set()

            s.build_system_deps[step_name]['post-conditions'].add(
                (step_name, 'execute'))

            s.build_system_deps[step_name]['post-conditions'].add(
                (step_name, 'collect-outputs'))

            #...................................................................
            # alias
            #...................................................................
            # Here we create nice names for building this entire step

            s.w.gen_step_alias_pre()

            # Pull in any backend dependencies

            extra_deps = set()

            for o in backend_outputs[step_name]['execute']:
                extra_deps.add(o)
            for o in backend_outputs[step_name]['collect-outputs']:
                extra_deps.add(o)
            for o in backend_outputs[step_name]['post-conditions']:
                extra_deps.add(o)

            extra_deps = list(extra_deps)

            # Metadata for customized backends

            s.build_system_rules[step_name]['alias'] = []

            # Use the backend writer to generate rules for each input, and then
            # grab any backend dependencies

            backend_outputs[step_name]['alias'] = []

            # Rule
            #
            # - Create an alias called {alias} for this step
            # - This rule depends on {deps}
            #

            rule = {
                'alias': step_name,
                'deps': [],
            }

            t = s.w.gen_step_alias(extra_deps=extra_deps, **rule)
            backend_outputs[step_name]['alias'] += t

            s.build_system_rules[step_name]['alias'].append(rule)

            # Rule
            #
            # - Create an alias called {alias} for this step
            # - This rule depends on {deps}
            #

            rule = {
                'alias': build_id,
                'deps': [],
            }

            t = s.w.gen_step_alias(extra_deps=extra_deps, **rule)
            backend_outputs[step_name]['alias'] += t

            s.build_system_rules[step_name]['alias'].append(rule)

            # Metadata for customized backends

            s.build_system_deps[step_name]['alias'] = set()

            s.build_system_deps[step_name]['alias'].add((step_name, 'execute'))

            s.build_system_deps[step_name]['alias'].add(
                (step_name, 'collect-outputs'))

            s.build_system_deps[step_name]['alias'].add(
                (step_name, 'post-conditions'))

            #...................................................................
            # debug
            #...................................................................
            # Generate the debug commands if they are defined in the YAML.

            s.w.gen_step_debug_pre()

            debug_commands = step.get_debug_commands()

            if debug_commands:

                commands = ' && '.join([
                    'cd ' + build_dir,
                    './{x} 2>&1 | tee {x}.log'.format(x=s.mflowgen_debug)
                ])

                # Rule
                #
                # - Run the {command}
                # - Generate the {target}
                # - Use {build_id} to guarantee uniqueness
                #

                debug_target = 'debug-' + step_name

                rule = {
                    'target': debug_target,
                    'command': commands,
                    'build_id': build_id,
                }

                s.w.gen_step_debug(**rule)

                s.build_system_rules[step_name]['debug'] = [rule]

                # Rule
                #
                # - Create an alias called {alias} for this step
                # - This rule depends on {deps}
                #

                rule = {
                    'alias': 'debug-' + build_id,
                    'deps': [debug_target],
                    'extra_deps': [],
                }

                s.w.gen_step_alias(**rule)

            else:

                s.build_system_rules[step_name]['debug'] = []

        # Now that all steps are done...

        # Call the backend writer's epilogue

        s.w.gen_epilogue()
示例#10
0
    def __init__(s, step_path, default=False):

        # Get the YAML file path
        #
        # If this is a default step, then we use the top-level steps directory

        s._config = {}

        if default:
            yaml_path = '/'.join(
                [get_top_dir(), 'steps', step_path, 'configure.yml'])
        else:
            yaml_path = '/'.join([step_path, 'configure.yml'])

        # Read the YAML data

        data = read_yaml(yaml_path)

        # Check that this is a valid step configuration

        assert 'name' in data.keys(), \
          'Step -- ' \
          'Step YAML must have a "name" field: {}'.format( yaml_path )

        # Remove empty inputs and outputs

        if 'inputs' in data.keys():
            if not data['inputs']:
                del (data['inputs'])

        if 'outputs' in data.keys():
            if not data['outputs']:
                del (data['outputs'])

        # Check that any tagged outputs only have one key and one value

        if 'outputs' in data.keys():
            for idx, o in enumerate(data['outputs']):
                if type(o) == dict:
                    assert len(o.keys()) == 1, 'Step -- Invalid output'
                    assert len(o.values()) == 1, 'Step -- Invalid output'

        # If commands are empty, replace with 'pass'

        if 'commands' not in data.keys():
            data['commands'] = ['true']

        if 'commands' in data.keys():
            if data['commands'] == [] or data['commands'] == None:
                data['commands'] = ['true']

        # Make sure we read the commands as strings
        #
        # - A shell command of 'true' mistakenly turns into a python boolean,
        #   so convert it back into a (lowercase) string..
        #

        assert type( data['commands'] ) == list, \
          'Step -- YAML "commands" must be a list: {}'.format( yaml_path )

        for i, c in enumerate(data['commands']):
            if type(c) == bool:
                data['commands'][i] = str(c).lower()

        # Replace any output tag shorthands with the real files
        #
        # So this configuration YAML:
        #
        #     outputs:
        #     - foo1.txt
        #     - foo2.txt
        #     - ~: results/1/2/3/data.txt
        #
        # Turns into this:
        #
        #     outputs = [
        #     - foo1.txt
        #     - foo2.txt
        #     - data.txt: results/1/2/3/data.txt
        #     ]
        #

        if 'outputs' in data.keys():
            for idx, o in enumerate(data['outputs']):
                if type(o) == dict:
                    if o.keys()[0] == None:
                        f = o.values()[0]
                        data['outputs'][idx] = {os.path.basename(f): f}

        # Save additional metadata aside from the YAML data
        #
        # - Step directory -- we copy this when we instance a step in a build
        # - YAML name      -- used to generate a parameterized YAML in a build
        #

        s.step_dir = \
          os.path.relpath( os.path.dirname( yaml_path ), os.getcwd() )

        data['source'] = os.path.dirname(os.path.abspath(yaml_path))

        # Save the config

        s._config.update(data)