Ejemplo n.º 1
0
    def instance_job(self, job_id):
        publish(self.context,
                'worker-status',
                job_id=job_id,
                status='apply_async')
        assert len(self.sub_available) > 0
        name = sorted(self.sub_available)[0]
        self.sub_available.remove(name)
        assert not name in self.sub_processing
        self.sub_processing.add(name)
        sub = self.subs[name]

        self.job2subname[job_id] = name

        if self.new_process:
            f = parmake_job2_new_process
            args = (job_id, self.context)

        else:
            f = parmake_job2
            args = (job_id, self.context, self.event_queue_name,
                    self.show_output)

        async_result = sub.apply_async(f, args)
        return async_result
Ejemplo n.º 2
0
def parmake_pool(job_list, context, cq,
                 n=DefaultsToConfig('max_parallel_jobs'), recurse=False):
    """
        Parallel equivalent of "make", using multiprocessing.Pool. (buggy)

        Usage:

           parmake [n=<num>] [joblist]

     """

    publish(context, 'parmake-status', status='Obtaining job list')
    job_list = list(job_list)

    db = context.get_compmake_db()
    if not job_list:
        job_list = list(top_targets(db=db))

    publish(context, 'parmake-status',
            status='Starting multiprocessing manager (forking)')
    manager = MultiprocessingManager(num_processes=n,
                                     cq=cq,
                                     context=context,
                                     recurse=recurse)

    publish(context, 'parmake-status',
            status='Adding %d targets.' % len(job_list))

    manager.add_targets(job_list)

    publish(context, 'parmake-status', status='Processing')
    manager.process()
    return raise_error_if_manager_failed(manager)
Ejemplo n.º 3
0
def cloudmake(job_list, context, cq,
            n=DefaultsToConfig('multyvac_max_jobs'),
            recurse=DefaultsToConfig('recurse'),
            new_process=DefaultsToConfig('new_process'),
            echo=DefaultsToConfig('echo'),
            skipsync=False,
            rdb=True):
    """
        Multyvac backend

    """
    # TODO: check it exists
    # noinspection PyUnresolvedReferences
    import multyvac

    disable_logging_if_config(context)
    
    publish(context, 'parmake-status', status='Obtaining job list')
    job_list = list(job_list)

    db = context.get_compmake_db()
    if not job_list:
        # XXX
        job_list = list(top_targets(db=db))
    
    volumes = sync_data_up(context, skipsync)
    
    if rdb:
        rdb_vol, rdb_db = synchronize_db_up(context, job_list)
    else:
        rdb_vol, rdb_db = None, None
        
    publish(context, 'parmake-status',
            status='Starting multiprocessing manager (forking)')
    manager = MVacManager(num_processes=n,
                           context=context,
                           cq=cq,
                           recurse=recurse,
                            new_process=new_process,
                            show_output=echo,
                            volumes=volumes,
                            rdb=rdb,
                            rdb_vol=rdb_vol,
                            rdb_db=rdb_db,
                           )

    publish(context, 'parmake-status',
            status='Adding %d targets.' % len(job_list))
    manager.add_targets(job_list)

    publish(context, 'parmake-status', status='Processing')
    manager.process()

    if not skipsync:
        sync_data_down(context)

    return raise_error_if_manager_failed(manager)
Ejemplo n.º 4
0
    def instance_job(self, job_id):
        publish(self.context,
                'worker-status',
                job_id=job_id,
                status='apply_async')
        assert len(self.sub_available) > 0
        name = sorted(self.sub_available)[0]
        self.sub_available.remove(name)
        assert not name in self.sub_processing
        self.sub_processing.add(name)
        sub = self.subs[name]

        self.job2subname[job_id] = name
        self.subname2job[name] = job_id

        job = get_job(job_id, self.db)

        if self.rdb:
            f = mvac_job_rdb
            args = (job_id, self.context, self.event_queue_name,
                    self.show_output, self.volumes, self.rdb_vol.name,
                    self.rdb_db, os.getcwd())
        else:
            if job.needs_context:
                # if self.new_process:
                #     f = parmake_job2_new_process
                #     args = (job_id, self.context)
                #
                # else:
                f = parmake_job2
                args = (job_id, self.context, self.event_queue_name,
                        self.show_output)
            else:
                f = mvac_job
                args = (job_id, self.context, self.event_queue_name,
                        self.show_output, self.volumes, os.getcwd())

        if True:
            async_result = sub.apply_async(f, args)
        else:
            warnings.warn('Debugging synchronously')
            async_result = f(args)

        return async_result
Ejemplo n.º 5
0
def parmake(job_list,
            context,
            cq,
            n=DefaultsToConfig('max_parallel_jobs'),
            recurse=DefaultsToConfig('recurse'),
            new_process=DefaultsToConfig('new_process'),
            echo=DefaultsToConfig('echo')):
    """
        Parallel equivalent of make.

        Uses multiprocessing.Process as a backend and a Python queue to
        communicate with the workers.

        Options:

          parmake n=10             Uses 10 workers
          parmake recurse=1        Recursive make: put generated jobs in the
          queue.
          parmake new_process=1    Run the jobs in a new Python process.
          parmake echo=1           Shows the output of the jobs. This might
          slow down everything.

          parmake new_process=1 echo=1   Not supported yet.

    """

    publish(context, 'parmake-status', status='Obtaining job list')
    job_list = list(job_list)

    db = context.get_compmake_db()
    if not job_list:
        # XXX
        job_list = list(top_targets(db=db))

    publish(context,
            'parmake-status',
            status='Starting multiprocessing manager (forking)')
    manager = PmakeManager(num_processes=n,
                           context=context,
                           cq=cq,
                           recurse=recurse,
                           new_process=new_process,
                           show_output=echo)

    publish(context,
            'parmake-status',
            status='Adding %d targets.' % len(job_list))
    manager.add_targets(job_list)

    publish(context, 'parmake-status', status='Processing')
    manager.process()

    return raise_error_if_manager_failed(manager)
Ejemplo n.º 6
0
    def instance_job(self, job_id):
        publish(self.context, 'worker-status', job_id=job_id,
                status='apply_async')
        assert len(self.sub_available) > 0
        name = sorted(self.sub_available)[0]
        self.sub_available.remove(name)
        assert not name in self.sub_processing
        self.sub_processing.add(name)
        sub = self.subs[name]

        self.job2subname[job_id] = name
        self.subname2job[name] = job_id

        job = get_job(job_id, self.db)

        if self.rdb:
            f = mvac_job_rdb
            args = (job_id, self.context,
                    self.event_queue_name, self.show_output,
                    self.volumes, self.rdb_vol.name, self.rdb_db, os.getcwd())            
        else:
            if job.needs_context:
                # if self.new_process:
                #     f = parmake_job2_new_process
                #     args = (job_id, self.context)
                # 
                # else:
                f = parmake_job2
                args = (job_id, self.context,
                        self.event_queue_name, self.show_output)
            else:
                f = mvac_job
                args = (job_id, self.context,
                        self.event_queue_name, self.show_output,
                        self.volumes, os.getcwd())
    
        if True:
            async_result = sub.apply_async(f, args)
        else:
            warnings.warn('Debugging synchronously')
            async_result = f(args)
            
        return async_result
Ejemplo n.º 7
0
def parmake(job_list, context, cq,
            n=DefaultsToConfig('max_parallel_jobs'),
            recurse=DefaultsToConfig('recurse'),
            new_process=DefaultsToConfig('new_process'),
            echo=DefaultsToConfig('echo')):
    """
        Parallel equivalent of make.

        Uses multiprocessing.Process as a backend and a Python queue to
        communicate with the workers.

        Options:

          parmake n=10             Uses 10 workers
          parmake recurse=1        Recursive make: put generated jobs in the
          queue.
          parmake new_process=1    Run the jobs in a new Python process.
          parmake echo=1           Shows the output of the jobs. This might
          slow down everything.

          parmake new_process=1 echo=1   Not supported yet.

    """

    publish(context, 'parmake-status', status='Obtaining job list')
    job_list = list(job_list)

    db = context.get_compmake_db()
    if not job_list:
        # XXX
        job_list = list(top_targets(db=db))

    publish(context, 'parmake-status',
            status='Starting multiprocessing manager (forking)')
    manager = PmakeManager(num_processes=n,
                           context=context,
                           cq=cq,
                           recurse=recurse,
                           new_process=new_process,
                           show_output=echo)

    publish(context, 'parmake-status',
            status='Adding %d targets.' % len(job_list))
    manager.add_targets(job_list)

    publish(context, 'parmake-status', status='Processing')
    manager.process()

    return raise_error_if_manager_failed(manager)
Ejemplo n.º 8
0
    def instance_job(self, job_id):
        publish(self.context, 'worker-status', job_id=job_id,
                status='apply_async')
        assert len(self.sub_available) > 0
        name = sorted(self.sub_available)[0]
        self.sub_available.remove(name)
        assert not name in self.sub_processing
        self.sub_processing.add(name)
        sub = self.subs[name]

        self.job2subname[job_id] = name

        if self.new_process:
            f = parmake_job2_new_process
            args = (job_id, self.context)

        else:
            f = parmake_job2
            args = (job_id, self.context,
                    self.event_queue_name, self.show_output)

        async_result = sub.apply_async(f, args)
        return async_result
Ejemplo n.º 9
0
def parmake_pool(job_list,
                 context,
                 cq,
                 n=DefaultsToConfig('max_parallel_jobs'),
                 recurse=False):
    """
        Parallel equivalent of "make", using multiprocessing.Pool. (buggy)

        Usage:

           parmake [n=<num>] [joblist]

     """

    publish(context, 'parmake-status', status='Obtaining job list')
    job_list = list(job_list)

    db = context.get_compmake_db()
    if not job_list:
        job_list = list(top_targets(db=db))

    publish(context,
            'parmake-status',
            status='Starting multiprocessing manager (forking)')
    manager = MultiprocessingManager(num_processes=n,
                                     cq=cq,
                                     context=context,
                                     recurse=recurse)

    publish(context,
            'parmake-status',
            status='Adding %d targets.' % len(job_list))

    manager.add_targets(job_list)

    publish(context, 'parmake-status', status='Processing')
    manager.process()
    return raise_error_if_manager_failed(manager)
Ejemplo n.º 10
0
 def progress_callback(stack):
     publish(context, 'job-progress-plus', job_id=job_id, host=host,
             stack=stack)
Ejemplo n.º 11
0
 def progress_callback(stack):
     publish(context,
             'job-progress-plus',
             job_id=job_id,
             host=host,
             stack=stack)
Ejemplo n.º 12
0
def parmake_job2(args):
    """
    args = tuple job_id, context, queue_name, show_events
        
    Returns a dictionary with fields "user_object", "new_jobs", 'delete_jobs'.
    "user_object" is set to None because we do not want to 
    load in our thread if not necessary. Sometimes it is necessary
    because it might contain a Promise. 
   
    """
    job_id, context, event_queue_name, show_output = args  # @UnusedVariable
    check_isinstance(job_id, str)
    check_isinstance(event_queue_name, str)
    from .pmake_manager import PmakeManager

    event_queue = PmakeManager.queues[event_queue_name]

    db = context.get_compmake_db()

    setproctitle('compmake:%s' % job_id)

    class G():
        nlostmessages = 0

    try:
        # We register a handler for the events to be passed back
        # to the main process
        def handler(event):
            try:
                if not CompmakeConstants.disable_interproc_queue:
                    event_queue.put(event, block=False)
            except Full:
                G.nlostmessages += 1
                # Do not write messages here, it might create a recursive
                # problem.
                # sys.stderr.write('job %s: Queue is full, message is lost.\n'
                # % job_id)

        remove_all_handlers()

        if show_output:
            register_handler("*", handler)

        def proctitle(event):
            stat = '[%s/%s %s] (compmake)' % (event.progress, event.goal,
                                              event.job_id)
            setproctitle(stat)

        register_handler("job-progress", proctitle)

        publish(context, 'worker-status', job_id=job_id, status='started')

        # Note that this function is called after the fork.
        # All data is conserved, but resources need to be reopened
        try:
            db.reopen_after_fork()  # @UndefinedVariable
        except:
            pass

        publish(context, 'worker-status', job_id=job_id, status='connected')

        res = make(job_id, context=context)

        publish(context, 'worker-status', job_id=job_id, status='ended')

        res['user_object'] = None
        result_dict_check(res)
        return res

    except KeyboardInterrupt:
        assert False, 'KeyboardInterrupt should be captured by make() (' \
                      'inside Job.compute())'
    except JobInterrupted:
        publish(context, 'worker-status', job_id=job_id, status='interrupted')
        raise
    except JobFailed:
        raise
    except BaseException:
        # XXX
        raise
    except:
        raise
    finally:
        publish(context, 'worker-status', job_id=job_id, status='cleanup')
        setproctitle('compmake-worker-finished %s' % job_id)
Ejemplo n.º 13
0
def parmake_job2(args):
    """
    args = tuple job_id, context, queue_name, show_events
        
    Returns a dictionary with fields "user_object", "new_jobs", 'delete_jobs'.
    "user_object" is set to None because we do not want to 
    load in our thread if not necessary. Sometimes it is necessary
    because it might contain a Promise. 
   
    """
    job_id, context, event_queue_name, show_output = args  # @UnusedVariable
    check_isinstance(job_id, str)
    check_isinstance(event_queue_name, str)
    from .pmake_manager import PmakeManager

    event_queue = PmakeManager.queues[event_queue_name]

    db = context.get_compmake_db()

    setproctitle('compmake:%s' % job_id)

    class G():
        nlostmessages = 0

    try:
        # We register a handler for the events to be passed back 
        # to the main process
        def handler( event):
            try:
                if not CompmakeConstants.disable_interproc_queue:
                    event_queue.put(event, block=False)
            except Full:
                G.nlostmessages += 1
                # Do not write messages here, it might create a recursive
                # problem.
                # sys.stderr.write('job %s: Queue is full, message is lost.\n'
                # % job_id)

        remove_all_handlers()

        if show_output:
            register_handler("*", handler)

        def proctitle(event):
            stat = '[%s/%s %s] (compmake)' % (event.progress,
                                              event.goal, event.job_id)
            setproctitle(stat)

        register_handler("job-progress", proctitle)

        publish(context, 'worker-status', job_id=job_id, status='started')

        # Note that this function is called after the fork.
        # All data is conserved, but resources need to be reopened
        try:
            db.reopen_after_fork()  # @UndefinedVariable
        except:
            pass

        publish(context, 'worker-status', job_id=job_id, status='connected')

        res = make(job_id, context=context)

        publish(context, 'worker-status', job_id=job_id, status='ended')

        res['user_object'] = None
        result_dict_check(res)
        return res
        
    except KeyboardInterrupt:
        assert False, 'KeyboardInterrupt should be captured by make() (' \
                      'inside Job.compute())'
    except JobInterrupted:
        publish(context, 'worker-status', job_id=job_id, status='interrupted')
        raise
    except JobFailed:
        raise
    except BaseException:
        # XXX
        raise
    except:
        raise
    finally:
        publish(context, 'worker-status', job_id=job_id, status='cleanup')
        setproctitle('compmake-worker-finished')
        
Ejemplo n.º 14
0
def cloudmake(job_list,
              context,
              cq,
              n=DefaultsToConfig('multyvac_max_jobs'),
              recurse=DefaultsToConfig('recurse'),
              new_process=DefaultsToConfig('new_process'),
              echo=DefaultsToConfig('echo'),
              skipsync=False,
              rdb=True):
    """
        Multyvac backend

    """
    # TODO: check it exists
    import multyvac  # @UnusedImport

    disable_logging_if_config(context)

    publish(context, 'parmake-status', status='Obtaining job list')
    job_list = list(job_list)

    db = context.get_compmake_db()
    if not job_list:
        # XXX
        job_list = list(top_targets(db=db))

    volumes = sync_data_up(context, skipsync)

    if rdb:
        rdb_vol, rdb_db = synchronize_db_up(context, job_list)
    else:
        rdb_vol, rdb_db = None, None

    publish(context,
            'parmake-status',
            status='Starting multiprocessing manager (forking)')
    manager = MVacManager(
        num_processes=n,
        context=context,
        cq=cq,
        recurse=recurse,
        new_process=new_process,
        show_output=echo,
        volumes=volumes,
        rdb=rdb,
        rdb_vol=rdb_vol,
        rdb_db=rdb_db,
    )

    publish(context,
            'parmake-status',
            status='Adding %d targets.' % len(job_list))
    manager.add_targets(job_list)

    publish(context, 'parmake-status', status='Processing')
    manager.process()

    if not skipsync:
        sync_data_down(context)

    return raise_error_if_manager_failed(manager)