示例#1
0
def make(job_list, context, cq,
         echo=DefaultsToConfig('echo'),
         new_process=DefaultsToConfig('new_process'),
         recurse=DefaultsToConfig('recurse')):
    """
        Makes selected targets; or all targets if none specified.

        Options:
            make recurse=1      Recursive make: put generated jobs in the
            queue.
            make new_process=1  Run the jobs in a new Python process.
            make echo=1         Displays the stdout/stderr for the job on
            the console.

            make new_process=1 echo=1   Not supported yet.
    """
    db = context.get_compmake_db()
    if not job_list:
        job_list = list(top_targets(db=db))

    manager = ManagerLocal(context=context, cq=cq,
                           recurse=recurse, new_process=new_process, echo=echo)
    manager.add_targets(job_list)
    manager.process()
    return raise_error_if_manager_failed(manager)
示例#2
0
def make(job_list,
         context,
         cq,
         echo=DefaultsToConfig('echo'),
         new_process=DefaultsToConfig('new_process'),
         recurse=DefaultsToConfig('recurse')):
    """
        Makes selected targets; or all targets if none specified.

        Options:
            make recurse=1      Recursive make: put generated jobs in the
            queue.
            make new_process=1  Run the jobs in a new Python process.
            make echo=1         Displays the stdout/stderr for the job on
            the console.

            make new_process=1 echo=1   Not supported yet.
    """
    db = context.get_compmake_db()
    if not job_list:
        job_list = list(top_targets(db=db))

    manager = ManagerLocal(context=context,
                           cq=cq,
                           recurse=recurse,
                           new_process=new_process,
                           echo=echo)
    manager.add_targets(job_list)
    manager.process()
    return raise_error_if_manager_failed(manager)
示例#3
0
def cloudmake(job_list, context, cq,
            n=DefaultsToConfig('multyvac_max_jobs'),
            recurse=DefaultsToConfig('recurse'),
            new_process=DefaultsToConfig('new_process'),
            echo=DefaultsToConfig('echo'),
            skipsync=False,
            rdb=True):
    """
        Multyvac backend

    """
    # TODO: check it exists
    # noinspection PyUnresolvedReferences
    import multyvac

    disable_logging_if_config(context)
    
    publish(context, 'parmake-status', status='Obtaining job list')
    job_list = list(job_list)

    db = context.get_compmake_db()
    if not job_list:
        # XXX
        job_list = list(top_targets(db=db))
    
    volumes = sync_data_up(context, skipsync)
    
    if rdb:
        rdb_vol, rdb_db = synchronize_db_up(context, job_list)
    else:
        rdb_vol, rdb_db = None, None
        
    publish(context, 'parmake-status',
            status='Starting multiprocessing manager (forking)')
    manager = MVacManager(num_processes=n,
                           context=context,
                           cq=cq,
                           recurse=recurse,
                            new_process=new_process,
                            show_output=echo,
                            volumes=volumes,
                            rdb=rdb,
                            rdb_vol=rdb_vol,
                            rdb_db=rdb_db,
                           )

    publish(context, 'parmake-status',
            status='Adding %d targets.' % len(job_list))
    manager.add_targets(job_list)

    publish(context, 'parmake-status', status='Processing')
    manager.process()

    if not skipsync:
        sync_data_down(context)

    return raise_error_if_manager_failed(manager)
示例#4
0
def parmake(job_list,
            context,
            cq,
            n=DefaultsToConfig('max_parallel_jobs'),
            recurse=DefaultsToConfig('recurse'),
            new_process=DefaultsToConfig('new_process'),
            echo=DefaultsToConfig('echo')):
    """
        Parallel equivalent of make.

        Uses multiprocessing.Process as a backend and a Python queue to
        communicate with the workers.

        Options:

          parmake n=10             Uses 10 workers
          parmake recurse=1        Recursive make: put generated jobs in the
          queue.
          parmake new_process=1    Run the jobs in a new Python process.
          parmake echo=1           Shows the output of the jobs. This might
          slow down everything.

          parmake new_process=1 echo=1   Not supported yet.

    """

    publish(context, 'parmake-status', status='Obtaining job list')
    job_list = list(job_list)

    db = context.get_compmake_db()
    if not job_list:
        # XXX
        job_list = list(top_targets(db=db))

    publish(context,
            'parmake-status',
            status='Starting multiprocessing manager (forking)')
    manager = PmakeManager(num_processes=n,
                           context=context,
                           cq=cq,
                           recurse=recurse,
                           new_process=new_process,
                           show_output=echo)

    publish(context,
            'parmake-status',
            status='Adding %d targets.' % len(job_list))
    manager.add_targets(job_list)

    publish(context, 'parmake-status', status='Processing')
    manager.process()

    return raise_error_if_manager_failed(manager)
示例#5
0
def parmake(job_list, context, cq,
            n=DefaultsToConfig('max_parallel_jobs'),
            recurse=DefaultsToConfig('recurse'),
            new_process=DefaultsToConfig('new_process'),
            echo=DefaultsToConfig('echo')):
    """
        Parallel equivalent of make.

        Uses multiprocessing.Process as a backend and a Python queue to
        communicate with the workers.

        Options:

          parmake n=10             Uses 10 workers
          parmake recurse=1        Recursive make: put generated jobs in the
          queue.
          parmake new_process=1    Run the jobs in a new Python process.
          parmake echo=1           Shows the output of the jobs. This might
          slow down everything.

          parmake new_process=1 echo=1   Not supported yet.

    """

    publish(context, 'parmake-status', status='Obtaining job list')
    job_list = list(job_list)

    db = context.get_compmake_db()
    if not job_list:
        # XXX
        job_list = list(top_targets(db=db))

    publish(context, 'parmake-status',
            status='Starting multiprocessing manager (forking)')
    manager = PmakeManager(num_processes=n,
                           context=context,
                           cq=cq,
                           recurse=recurse,
                           new_process=new_process,
                           show_output=echo)

    publish(context, 'parmake-status',
            status='Adding %d targets.' % len(job_list))
    manager.add_targets(job_list)

    publish(context, 'parmake-status', status='Processing')
    manager.process()

    return raise_error_if_manager_failed(manager)
示例#6
0
def make(job_list):
    '''Makes selected targets; or all targets if none specified. ''' 
    job_list = list(job_list)
    
    if not job_list:
        job_list = list(top_targets())
    
    #print "Making %d jobs" % len(job_list)
    
    manager = ManagerLocal()
    manager.add_targets(job_list)
    manager.process()

    if manager.failed:
        return RET_CODE_JOB_FAILED
    else:
        return 0
示例#7
0
def parmake(job_list, n=None):
    '''Parallel equivalent of "make".

Usage:
       
       parmake [n=<num>] [joblist]
 '''
    job_list = list(job_list)
    
    if not job_list:
        job_list = list(top_targets())
    
    manager = MultiprocessingManager(n)
    manager.add_targets(job_list, more=False)
    manager.process()
    
    if manager.failed:
        return RET_CODE_JOB_FAILED
    else:
        return 0
示例#8
0
def clustmake(job_list):
    '''Cluster equivalent of "make".

       Note: you should use the Redis backend to use multiprocessing.
 '''
    
    job_list = list(job_list)
    
    if not job_list:
        job_list = list(top_targets())    
        
    cluster_conf = compmake_config.cluster_conf #@UndefinedVariable

    if not os.path.exists(cluster_conf):
        raise UserError('Configuration file "%s" does not exist.' % cluster_conf)    
    hosts = parse_yaml_configuration(open(cluster_conf))
    manager = ClusterManager(hosts)
    manager.add_targets(job_list)
    manager.process()
    
    if manager.failed:
        return RET_CODE_JOB_FAILED
    else:
        return 0
示例#9
0
def graph(job_list, filename='compmake', compact=0,
          filter='dot', format='png'):
    '''Creates a graph of the given targets and dependencies 
    
        graph filename=filename compact=0,1 format=png,...
         
        Params:
            filename:  name of generated filename in the dot format
            compact=0: whether to include the job names in the nodes  
            filter=[dot,circo,twopi,...]  which algorithm to use to arrange
                       the nodes. This depends on the topology of your 
                       computation. The default is 'dot' 
                       (hierarchy top-bottom). 
            format=[png,...]  The output file format.
    '''
    if not job_list:
        job_list = top_targets()
    
    job_list = tree(job_list)
    
    try:
        import gvgen #@UnresolvedImport
    except:
        gvgen_url = 'http://software.inl.fr/trac/wiki/GvGen' 
        raise UserError('To use the "graph" command' + 
                        ' you have to install the "gvgen" package from %s' % 
                        gvgen_url)
        
    graph = gvgen.GvGen() 

    state2color = {
        Cache.NOT_STARTED: 'grey',
        Cache.IN_PROGRESS: 'yellow',
        Cache.MORE_REQUESTED: 'blue',
        Cache.FAILED: 'red',
        Cache.DONE: 'green'
    }

    job2node = {}
    for job_id in job_list:
        if int(compact):
            job2node[job_id] = graph.newItem("")
        else:
            job2node[job_id] = graph.newItem(job_id)
        cache = get_job_cache(job_id)
        graph.styleAppend(job_id, "style", "filled")
        graph.styleAppend(job_id, "fillcolor", state2color[cache.state])
        graph.styleApply(job_id, job2node[job_id])
    
    for job_id in job_list:
        #c = get_computation(job_id)
        #children_id = [x.job_id for x in c.depends]
        for child in direct_children(job_id):
            graph.newLink(job2node[job_id], job2node[child])
    
    # TODO: add check?
    with open(filename, 'w') as f:
        graph.dot(f)    
    
    output = filename + '.' + format
    cmd_line = '%s %s -T%s -o%s' % (filter, filename, format, output)    
    try:
        os.system(cmd_line)
    except:
        raise UserError("Could not run dot (cmdline='%s')\
Make sure graphviz is installed" % cmd_line) # XXX maybe not UserError

    info("Written output on files %s, %s." % (filename, output))
示例#10
0
def graph(job_list, context, filename='compmake-graph',
          filter='dot', format='png',  # @ReservedAssignment
          label='id', color=True,
          cluster=False, processing=set()):
    """

        Creates a graph of the given targets and dependencies.

        Usage:

            @: graph filename=filename label=[id,function,none] color=[0|1] format=png filter=[dot|circo|...]

        Options:

            filename:  name of generated filename in the dot format
            label='none','id','function'
            color=[0|1]: whether to color the nodes
            filter=[dot,circo,twopi,...]  which algorithm to use to arrange
                       the nodes. The best choice depends on
                       the topology of your
                       computation. The default is 'dot'
                       (hierarchy top-bottom).
            format=[png,...]  The output file format.
    """
    possible = ['none', 'id', 'function']
    if not label in possible:
        msg = 'Invalid label method %r not in %r.' % (label, possible)
        raise ValueError(msg)

    db = context.get_compmake_db()
    if not job_list:
        job_list = list(top_targets(db))

    print('jobs: %s' % job_list)
    print('processing: %s' % processing)
    print('Importing gvgen')

    try:
#        import gvgen
        pass
    except:
        gvgen_url = 'https://github.com/stricaud/gvgen'
        msg = ('To use the "graph" command you have to install the "gvgen" '
               'package from %s') % gvgen_url
        raise UserError(msg)

    print('Getting all jobs in tree')

    cq = CacheQueryDB(db)
    job_list = set(job_list)
    # all the dependencies
    job_list.update(cq.tree(job_list))

    # plus all the jobs that were defined by them
    job_list.update(definition_closure(job_list, db))

    job_list = set(job_list)

#     print('closure: %s' % sorted(job_list))

    if cluster:
        ggraph = create_graph2_clusters(cq, job_list, label=label, color=color,
                                        processing=processing)
    else:
        ggraph = create_graph1(cq, job_list, label=label, color=color,
                               processing=processing)
    print('Writing graph on %r.' % filename)
    # TODO: add check?
    with open(filename, 'w') as f:
        ggraph.dot(f)

    print('Running rendering')
    output = filename + '.' + format
    cmd_line = '%s %s -T%s -o%s' % (filter, filename, format, output)
    print('  %s' % cmd_line)
    try:
        os.system(cmd_line)
    except:
        msg = "Could not run dot (cmdline='%s') Make sure graphviz is " \
              "installed" % cmd_line
        raise UserError(msg)  # XXX maybe not UserError

    info("Written output on files %s, %s." % (filename, output))
示例#11
0
def cloudmake(job_list,
              context,
              cq,
              n=DefaultsToConfig('multyvac_max_jobs'),
              recurse=DefaultsToConfig('recurse'),
              new_process=DefaultsToConfig('new_process'),
              echo=DefaultsToConfig('echo'),
              skipsync=False,
              rdb=True):
    """
        Multyvac backend

    """
    # TODO: check it exists
    import multyvac  # @UnusedImport

    disable_logging_if_config(context)

    publish(context, 'parmake-status', status='Obtaining job list')
    job_list = list(job_list)

    db = context.get_compmake_db()
    if not job_list:
        # XXX
        job_list = list(top_targets(db=db))

    volumes = sync_data_up(context, skipsync)

    if rdb:
        rdb_vol, rdb_db = synchronize_db_up(context, job_list)
    else:
        rdb_vol, rdb_db = None, None

    publish(context,
            'parmake-status',
            status='Starting multiprocessing manager (forking)')
    manager = MVacManager(
        num_processes=n,
        context=context,
        cq=cq,
        recurse=recurse,
        new_process=new_process,
        show_output=echo,
        volumes=volumes,
        rdb=rdb,
        rdb_vol=rdb_vol,
        rdb_db=rdb_db,
    )

    publish(context,
            'parmake-status',
            status='Adding %d targets.' % len(job_list))
    manager.add_targets(job_list)

    publish(context, 'parmake-status', status='Processing')
    manager.process()

    if not skipsync:
        sync_data_down(context)

    return raise_error_if_manager_failed(manager)