コード例 #1
0
ファイル: train.py プロジェクト: liucong3/AC
def main():
	import log
	logger = log.Logger(args(), init_train_info={'epoch':0, 'loss':[]})
	logger.info('\n'.join([arg_name + ': ' + str(arg_val) for arg_name, arg_val in args().__dict__.items()]))
	model = logger.create_model(args().train_gpu)
	if logger.get_model_path() is None:
		logger.save_model(model)

	import threading, queue, multiprocessing
	ctx = multiprocessing.get_context('spawn')
	queue = ctx.Queue()
	start_exploration_processes(ctx, queue)

	try:
		replay_buffer = []
		while True:
			while not queue.empty():
				replay_buffer.extend(queue.get())
			length = len(replay_buffer)
			if length >= args().batch_size: break
		import threading
		model_lock = threading.Lock()
		start_evaluation_threads(model, model_lock, logger)
		train(replay_buffer, queue, model, model_lock, logger)
	finally:
		queue.close()
コード例 #2
0
ファイル: test_manager.py プロジェクト: willnx/autobox
 def _drain_queue(queue):
     """If you don't empty the queues before termination, then you get a BrokePipe traceback"""
     while not queue.empty():
         try:
             queue.get()
         except Exeption:
             pass
     queue.close()
     queue.join_thread()
コード例 #3
0
ファイル: scraper.py プロジェクト: blues-lab/polipy
def _download_policies(policies, output_dir, language, verbose, check_previous, processes=multiprocessing.cpu_count()):
    unique_policies = set(policies)
    logger.info('Attempting to download %d policies' % len(unique_policies))
    logger.info('output_dir=%s, language=%s' % (output_dir, language))

    # Create the tagged output directory <output_dir>/<date>/<region_tag>/, if necessary
    utc_date = datetime.datetime.utcnow().strftime('%Y%m%d')
    tagged_output_dir = os.path.join(os.path.abspath(output_dir), utc_date) #, region_tag)
    assert not os.path.isfile(tagged_output_dir), 'Tagged output directory %s already exists as a file'
    if(not os.path.isdir(tagged_output_dir)):
        logger.info('Creating tagged output directory %s' % tagged_output_dir)
        os.makedirs(tagged_output_dir)
    assert os.path.isdir(tagged_output_dir), 'Tagged output directory %s does not exist' % tagged_output_dir

    # Set up the process to kill hung geckodrivers
    logger.info('Setting up geckodriver killer')
    queue = multiprocessing.Queue()
    killer_process = multiprocessing.Process(target=_kill_zombies_parallel_wrapper, args=(queue,))
    killer_process.start()

    # Calculate date to check by if update argument is true
    check_date = None
    if check_previous:
        directory_dates = []
        for d in os.listdir(os.path.abspath(output_dir)):
            if d != utc_date:
                try:
                    directory_dates.append(datetime.datetime.strptime(d, '%Y%m%d'))
                except ValueError:
                    pass
        check_date = max(directory_dates).strftime('%Y%m%d') if len(directory_dates) > 0 else None

    # Download the policies
    logger.info('Parallelizing downloads over %d processes' % processes)
    parallel_args = [ParallelArg(x, \
                                 os.path.join(tagged_output_dir, x.domain, x.url_md5), \
                                 utc_date=utc_date, \
                                 browser_language=language, \
                                 check_date=check_date, \
                                 verbose=verbose) \
                     for x in unique_policies]

    with multiprocessing.Pool(processes=processes) as pool:
        r = list(tqdm(pool.imap_unordered(_download_policy_parallel_wrapper, parallel_args), total=len(parallel_args)))

    # Kill the geckodriver killer process
    logger.info('Stopping the geckodriver killer process')
    queue.put(True)
    queue.close()
    queue.join_thread()
    killer_process.join()
コード例 #4
0
ファイル: processing.py プロジェクト: janek/bb_behavior
            def _wrapped(thread_context=None):
                takes_thread_context = "thread_context" in inspect.signature(
                    target).parameters
                if takes_thread_context and (thread_context is None) and (
                        thread_context_factory is not None):
                    with thread_context_factory as ctx:
                        _wrapped(thread_context=ctx)
                        return

                is_generator = inspect.isgeneratorfunction(target)
                try:
                    if inqueue is not None:
                        call_scheme = None
                        while True:
                            job = inqueue.get()
                            if job is None:
                                # Queue finished. Put marker back.
                                inqueue.put(None)
                                break
                            if not call_scheme:
                                if not type(job) is tuple:
                                    call_scheme = lambda x: target(
                                        x, thread_context=thread_context)
                                else:
                                    call_scheme = lambda x: target(
                                        *x, thread_context=thread_context)
                            if not is_generator:
                                results = call_scheme(job)
                                if results is not None:
                                    outqueue.put(results)
                            else:
                                for results in call_scheme(job):
                                    if results is not None:
                                        outqueue.put(results)
                    else:
                        for results in target(*args, **kwargs):
                            if results is not None:
                                outqueue.put(results)

                    thread_index = finished_barrier.wait()
                    if thread_index == 0:
                        outqueue.put(None)
                except Exception as e:
                    print("Error at job: {}".format(str(target)))
                    print(str(e))
                    if not self.use_threads:
                        for queue in self.queues:
                            queue.close()
                    raise
コード例 #5
0
ファイル: trainer.py プロジェクト: XeryusTC/machine-learning
def train(config, num_workers):
    excepts = []
    queue = mp.JoinableQueue()
    for gamma in config.gammas:
        for width in config.widths:
            for eta in config.etas:
                queue.put((width, gamma, eta, config.runs))
    # stop signals
    workers = [mp.Process(target=train_worker, args=(queue,))
        for i in range(num_workers)]
    [w.start() for w in workers]

    # Wait for work to complete
    queue.join()
    queue.close()
    [w.join() for w in workers]
コード例 #6
0
def main():
    parser = argparse.ArgumentParser(description='Read a config file.')
    parser.add_argument('urqmd_file', metavar='URQMD_FILE', type=argparse.FileType('r', encoding='ascii'), help="Must be of type .f14")
    parser.add_argument('out_file', metavar='OUT_FILE', help='The HDF5 (.h5) file to store the information in')
    parser.add_argument('--no-event-columns', action='store_true', help="Don NOT include columns for the event number and event impact parameter.")
    parser.add_argument('--chunksize', type=int, default = 100000, help='The number of lines to read in one go.')
    parser.add_argument('--verbosity', choices=['DEBUG', 'INFO', 'WARNING', 'ERROR'], default='INFO', help="How verbose should the output be")
    args = parser.parse_args()

    logging.basicConfig(level=args.verbosity, format='%(asctime)s.%(msecs)d %(levelname)s %(message)s', datefmt="%Y-%m-%d %H:%M:%S")

    queue = multiprocessing.Queue()
    worker = HDF_Worker(args.out_file, queue)
    worker.start()
    for df in F14_Reader(args.urqmd_file, not args.no_event_columns).iter_dataframes(chunksize = args.chunksize):
        logging.debug("DataFrame ready to be written to file.")
        if not queue.empty(): time.sleep(0.05)
        logging.debug("Queue empty. DataFrame will be put into write queue now.")
        queue.put(df.copy())
    queue.put('EOF')
    queue.close()
    queue.join_thread()
    worker.join()
コード例 #7
0
ファイル: symmetricpool.py プロジェクト: vsekhar/vmesh
	def end(self):
		self.do_all(None) # stop sentinel
		for _, queue in self._processes:
			queue.close()
		for process, _ in self._processes:
			process.join()
コード例 #8
0

def worker(q):
    obj = q.get()
    obj.do_sth()


queue = multiprocessing.Queue()
p1 = multiprocessing.Process(target=worker, args=(queue, ))
p2 = multiprocessing.Process(target=worker, args=(queue, ))
p1.start()
p2.start()
queue.put(MyFancyClass("sdjaklsdj"))
queue.put(MyFancyClass("gahahah"))
# wait for worker to finish
queue.close()
queue.join_thread()
p1.join()
p2.join()
print()


# manage several workers consuming data from a JoinableQueue and passing results back to the parent process
class Consumer(multiprocessing.Process):
    def __init__(self, task_queue, result_queue):
        super().__init__()
        self.task_queue = task_queue
        self.result_queue = result_queue

    def run(self):
        proc_name = self.name
コード例 #9
0
    def run(self):
        NTRC.ntracef(5, "END", "proc run ltJobs|%s|" % (self.gl.ltJobs))
        nCasesDone = 0
        self.gl.nWaitedForDone = 0
        while True:
            # L O C K 
            with self.gl.lockJobList:
                with self.gl.lockPrint:
                    NTRC.ntracef(3, "END", "proc ltJobs|%s|" % (self.gl.ltJobs))
                    ltActiveJobs = [(idx,tJob) for idx,tJob in 
                                    enumerate(self.gl.ltJobs) if tJob]
                    NTRC.ntracef(3, "END", "proc ltActiveJobs|%s|" 
                                % (ltActiveJobs))
                for idxtJob in ltActiveJobs:
                    idx,tJob = idxtJob
                    nJob = tJob.procid
                    proc = self.gl.dId2Proc[nJob]
                    if not proc.is_alive():
                        with self.gl.lockPrint:
                            NTRC.ntracef(3, "END", "proc endall found done "
                                        "ltJobs[%s]=procid|%s|=|%s| alive?|%s|" 
                                        % (idx, nJob, proc, proc.is_alive()))
                            # Job listed as still baking but reports that it is done.
                        # Wait until it is fully baked.
                        proc.join()
                        with self.gl.lockPrint:
                            NTRC.ntracef(0, "END", "proc case|%s| end   " 
                                        % (nJob))
                        # Get its output for the full debug list.
                        queue = self.gl.dId2Queue[nJob]
                        lQOutput = []
                        while not queue.empty():
                            lLinesOut = queue.get().listoflists
                            lQOutput.append(lLinesOut)
                        queue.close()
                        if self.gl.bDebugPrint:
                            with self.gl.lockPrint:
                                NTRC.ntracef(5, "END", "proc lQOutput from q|%s|" 
                                                % (lQOutput))
                                self.llsFullOutput.extend(lQOutput)
                                NTRC.ntracef(5, "END", "proc lOutput from q|%s|" 
                                            % (self.llsFullOutput))
                            # Remove job from active list and Id-dicts.
                        # If the queue objects are still in the dId2Queue dict,
                        #  the pipe remains open, oops.  
                        self.gl.ltJobs[idx] = None
                        self.gl.dId2Proc.pop(nJob)
                        self.gl.dId2Queue.pop(nJob)
                        nCasesDone += 1
                        self.gl.nCasesDone += 1
                        with self.gl.lockPrint:
                            NTRC.ntracef(3, "END", "proc job completed ndone|%s|" 
                                        % (self.gl.nCasesDone))
                    else:
                        with self.gl.lockPrint:
                            NTRC.ntracef(3, "END", "proc job alive "
                                        "ltJobs[%s]=procid|%s|=|%s|"
                                        % (idx, nJob, proc))

                with self.gl.lockPrint:
                    NTRC.ntracef(3, "END", "proc end for-activejobs1"
                                " thatsall?|%s| ndone|%s| nstarted|%s|" 
                                % (self.gl.bThatsAllFolks
                                , self.gl.nCasesDone, self.gl.nCasesStarted))

            # Now unlock and check for end of loop.
            if (self.gl.bThatsAllFolks 
                and self.gl.nCasesDone == self.gl.nCasesTotal):
                with self.gl.lockPrint:
                    NTRC.ntracef(0, "END", "proc end of all jobs, "
                                "ndone|%s| nwaits instr|%s| slot|%s| done|%s|" 
                                % (nCasesDone
                                , self.gl.nWaitedForInstr
                                , self.gl.nWaitedForSlot
                                , self.gl.nWaitedForDone
                                ))
                break
            else:
                self.gl.nWaitedForDone += 1
                with self.gl.lockPrint:
                    NTRC.ntracef(3, "END", "proc end for-activejobs2 wait, "
                                "ndone|%s| nwaits|%s|" 
                                % (nCasesDone, self.gl.nWaitedForDone))
                time.sleep(self.nWaitMsec / 1000.0)
                continue
            # E N D L O C K 

        # llsFullOutput is a list of list of strings, where
        #  the inner list is lines output from commands for
        #  one job, more or less, with prefix and suffix 
        #  and comments, too.
        # Paste the whole thing together into a yuge list of lines.
        if self.gl.bDebugPrint:
            sFullOutput = ""
            for lJobOut in self.llsFullOutput:
                sJobOut = "\n".join(lJobOut)
                sFullOutput += sJobOut
            NTRC.ntracef(5, "END", "proc sFullOutput|%s|" % (sFullOutput))
コード例 #10
0
    def run(self):
        NTRC.ntracef(5, "END", "proc run ltJobs|%s|" % (self.gl.ltJobs))
        nCasesDone = 0
        self.gl.nWaitedForDone = 0
        while True:
            # L O C K 
            with self.gl.lockJobList:
                with self.gl.lockPrint:
                    NTRC.ntracef(3, "END", "proc ltJobs|%s|" % (self.gl.ltJobs))
                    ltActiveJobs = [(idx,tJob) for idx,tJob in 
                                    enumerate(self.gl.ltJobs) if tJob]
                    NTRC.ntracef(3, "END", "proc ltActiveJobs|%s|" 
                                % (ltActiveJobs))
                for idxtJob in ltActiveJobs:
                    idx,tJob = idxtJob
                    nJob = tJob.procid
                    proc = self.gl.dId2Proc[nJob]
                    if not proc.is_alive():
                        with self.gl.lockPrint:
                            NTRC.ntracef(3, "END", "proc endall found done "
                                        "ltJobs[%s]=procid|%s|=|%s| alive?|%s|" 
                                        % (idx, nJob, proc, proc.is_alive()))
                            # Job listed as still baking but reports that it is done.
                        # Wait until it is fully baked.
                        proc.join()
                        with self.gl.lockPrint:
                            NTRC.ntracef(0, "END", "proc case|%s| end   " 
                                        % (nJob))
                        # Get its output for the full debug list.
                        queue = self.gl.dId2Queue[nJob]
                        lQOutput = []
                        while not queue.empty():
                            lLinesOut = queue.get().listoflists
                            lQOutput.append(lLinesOut)
                        queue.close()
                        if self.gl.bDebugPrint:
                            with self.gl.lockPrint:
                                NTRC.ntracef(5, "END", "proc lQOutput from q|%s|" 
                                                % (lQOutput))
                                self.llsFullOutput.extend(lQOutput)
                                NTRC.ntracef(5, "END", "proc lOutput from q|%s|" 
                                            % (self.llsFullOutput))
                            # Remove job from active list and Id-dicts.
                        # If the queue objects are still in the dId2Queue dict,
                        #  the pipe remains open, oops.  
                        self.gl.ltJobs[idx] = None
                        self.gl.dId2Proc.pop(nJob)
                        self.gl.dId2Queue.pop(nJob)
                        nCasesDone += 1
                        self.gl.nCasesDone += 1
                        with self.gl.lockPrint:
                            NTRC.ntracef(3, "END", "proc job completed ndone|%s|" 
                                        % (self.gl.nCasesDone))
                    else:
                        with self.gl.lockPrint:
                            NTRC.ntracef(3, "END", "proc job alive "
                                        "ltJobs[%s]=procid|%s|=|%s|"
                                        % (idx, nJob, proc))

                with self.gl.lockPrint:
                    NTRC.ntracef(3, "END", "proc end for-activejobs1"
                                " thatsall?|%s| ndone|%s| nstarted|%s|" 
                                % (self.gl.bThatsAllFolks
                                , self.gl.nCasesDone, self.gl.nCasesStarted))

            # Now unlock and check for end of loop.
            if (self.gl.bThatsAllFolks 
                and self.gl.nCasesDone == self.gl.nCasesTotal):
                with self.gl.lockPrint:
                    NTRC.ntracef(0, "END", "proc end of all jobs, "
                                "ndone|%s| nwaits instr|%s| slot|%s| done|%s|" 
                                % (nCasesDone
                                , self.gl.nWaitedForInstr
                                , self.gl.nWaitedForSlot
                                , self.gl.nWaitedForDone
                                ))
                break
            else:
                self.gl.nWaitedForDone += 1
                with self.gl.lockPrint:
                    NTRC.ntracef(3, "END", "proc end for-activejobs2 wait, "
                                "ndone|%s| nwaits|%s|" 
                                % (nCasesDone, self.gl.nWaitedForDone))
                time.sleep(self.nWaitMsec / 1000.0)
                continue
            # E N D L O C K 

        # llsFullOutput is a list of list of strings, where
        #  the inner list is lines output from commands for
        #  one job, more or less, with prefix and suffix 
        #  and comments, too.
        # Paste the whole thing together into a yuge list of lines.
        if self.gl.bDebugPrint:
            sFullOutput = ""
            for lJobOut in self.llsFullOutput:
                sJobOut = "\n".join(lJobOut)
                sFullOutput += sJobOut
            NTRC.ntracef(5, "END", "proc sFullOutput|%s|" % (sFullOutput))