def export_table(host, port, auth_key, db, table, directory, fields, format, error_queue, progress_info, stream_semaphore, exit_event):
    writer = None

    try:
        # This will open at least one connection for each rdb_call_wrapper, which is
        # a little wasteful, but shouldn't be a big performance hit
        conn_fn = lambda: r.connect(host, port, auth_key=auth_key)
        rdb_call_wrapper(conn_fn, "count", get_table_size, db, table, progress_info)
        table_info = rdb_call_wrapper(conn_fn, "info", write_table_metadata, db, table, directory)

        with stream_semaphore:
            task_queue = SimpleQueue()
            writer = launch_writer(format, directory, db, table, fields, task_queue, error_queue)
            writer.start()

            rdb_call_wrapper(conn_fn, "table scan", read_table_into_queue, db, table,
                             table_info["primary_key"], task_queue, progress_info, exit_event)
    except (r.RqlError, r.RqlDriverError) as ex:
        error_queue.put((RuntimeError, RuntimeError(ex.message), traceback.extract_tb(sys.exc_info()[2])))
    except:
        ex_type, ex_class, tb = sys.exc_info()
        error_queue.put((ex_type, ex_class, traceback.extract_tb(tb)))
    finally:
        if writer is not None and writer.is_alive():
            task_queue.put(("exit", "event")) # Exit is triggered by sending a message with two objects
            writer.join()
        else:
            error_queue.put((RuntimeError, RuntimeError("writer unexpectedly stopped"),
                             traceback.extract_tb(sys.exc_info()[2])))
예제 #2
0
 def _setup(self):
     if isinstance(self._instruction, I.ATM):
         self._code = self._instruction.code
         self._instruction_pipelines.append(Pipeline(self._code.instructions, self._tables))
         self._atomic_process = Process(target=self.run_atomic)
         self._is_atomic_enabled = True
     elif isinstance(self._instruction, I.SEQ):
         self._code = self._instruction.code
         self._instruction_pipelines.append(Pipeline(self._code.instructions, self._tables))
         self._sequential_ingress_process = Process(target=self.run_sequential_ingress)
         self._sequential_egress_process = Process(target=self.run_sequential_egress)
         self._metadata_queue = Queue()
         self._is_sequential_enabled = True
     elif isinstance(self._instruction, I.CNC):
         # Note: CNC can't have PUSH/POP instructions in its code blocks. They violate the concurrency invariant.
         self._codes = self._instruction.codes
         self._modified_locations = []
         self._modified_reserved_fields = []
         self._modified_fields = []
         for code in self._codes:
             self._instruction_pipelines.append(Pipeline(code.instructions, self._tables))
             self._modified_locations.append(get_modified_locations(code.instructions))
             self._modified_reserved_fields.append(get_modified_reserved_fields(code.instructions))
             self._modified_fields.append(get_modified_fields(code.instructions, code.argument_fields))
         self._concurrent_ingress_process = Process(target=self.run_concurrent_ingress)
         self._concurrent_egress_process = Process(target=self.run_concurrent_egress)
         self._metadata_queue = Queue()
         self._is_concurrent_enabled = True
     else:
         raise RuntimeError()
예제 #3
0
 def _wrapped_function(self, process_idx: int, function: Callable,
                       args: Any, kwargs: Any,
                       return_queue: SimpleQueue) -> None:
     self._worker_setup(process_idx)
     result = function(*args, **kwargs)
     if self.local_rank == 0:
         return_queue.put(move_data_to_device(result, "cpu"))
예제 #4
0
def _init_queues(n):
    global verbose_output
    verbose_output = SimpleQueue()
    global signals
    signals = SimpleQueue()
    global queues
    queues = [SimpleQueue() for _ in range(n)]
예제 #5
0
    def _wrapping_function(
        self,
        process_idx: int,
        trainer: Optional["pl.Trainer"],
        function: Callable,
        args: Any,
        kwargs: Any,
        return_queue: SimpleQueue,
    ) -> None:
        self._strategy._worker_setup(process_idx)
        results = function(*args, **kwargs)

        if trainer is not None:
            results = self._collect_rank_zero_results(trainer, results)

        if self._strategy.local_rank == 0:
            return_queue.put(move_data_to_device(results, "cpu"))

        # https://github.com/pytorch/xla/issues/1801#issuecomment-602799542
        self._strategy.barrier("end-process")

        # Ensure that the rank 0 process is the one exiting last
        # https://github.com/pytorch/xla/issues/2190#issuecomment-641665358
        if self._strategy.local_rank == 0:
            time.sleep(2)
예제 #6
0
 def _handle_workers(cls, ctx, processes: int, Proc,
                     task_queue: SimpleQueue, in_queue: SimpleQueue,
                     out_queue: SimpleQueue, init_args, worker: Worker,
                     wrap_exception, change_notifier: SimpleQueue,
                     pool: List[Process]):
     """
     管理进程池中的所有进程,在线程中执行
     :param ctx: 进程上下文
     :param processes: 指定的进程数量
     :param Proc: 用于创建进程,使用get_context()完成
     :param in_queue: 将任务发送给进程
     :param out_queue: 从执行完的进程获取数据
     :param init_args: 初始化数据
     :param worker: 指定的Worker
     :param wrap_exception: 是否需要包裹任务执行异常
     :return:
     """
     cur_th = threading.current_thread()
     while cur_th._state == State.RUN:
         cls._maintain_pool(ctx, processes, Proc, in_queue, out_queue,
                            init_args, worker, wrap_exception, pool)
         cls._wait_for_updates(change_notifier)
     # exit thread
     logging.debug("send exit signal to task queue")
     task_queue.put(EndSignal.END)
예제 #7
0
 def __call__(self, in_queue: SimpleQueue, out_queue: SimpleQueue,
              init_args, wrap_exception, *args, **kwargs):
     if init_args:
         self.initializer(init_args)
     while True:
         try:
             logging.debug("waiting recv task")
             task = in_queue.get()
             logging.debug("task received")
         except (EOFError, OSError):
             logging.debug('worker got EOFError or OSError -- exiting')
             break
         if task is None:
             logging.debug('worker got sentinel -- exiting')
             break
         p_args = task.args()
         if isinstance(p_args, Tuple):
             args_l, plugins = p_args
             exit_code = self._main(args_l, plugins)
         else:
             exit_code = self._main(p_args)
         try:
             out_queue.put(exit_code)
         except Exception as e:
             out_queue.put(e)
예제 #8
0
    def __init__(self, key, task_group, randomize):
        self.key = key
        self.gen_worker = task_group['gen_worker']
        self.task_ids = task_group['task_ids']
        self.is_parallel = task_group['is_parallel']
        if self.is_parallel:
            self.randomize = randomize
            if self.randomize:
                random.shuffle(self.task_ids)
        else:
            self.randomize = False
        self.result_queue = SimpleQueue()
        self.task_queue = SimpleQueue()

        # Don't expose queues file descriptors over Popen to, say, tarantool
        # running tests.
        set_fd_cloexec(self.result_queue._reader.fileno())
        set_fd_cloexec(self.result_queue._writer.fileno())
        set_fd_cloexec(self.task_queue._reader.fileno())
        set_fd_cloexec(self.task_queue._writer.fileno())

        for task_id in self.task_ids:
            self.task_queue.put(task_id)
        self.worker_ids = set()
        self.done = False
        self.done_task_ids = set()
예제 #9
0
def multi_process_train(
    args,
    error_queue: mp_queues.SimpleQueue,
    output_queue: Optional[mp_queues.Queue],
    init_fn: Optional[Callable[[], None]] = None,
):
    try:
        if init_fn:
            init_fn()
        torch.cuda.set_device(args.device_id)
        if args.distributed_world_size > 1:
            args.distributed_rank = distributed_utils.distributed_init(args)
        extra_state, trainer, task, epoch_itr = setup_training(args)
        train(
            args=args,
            extra_state=extra_state,
            trainer=trainer,
            task=task,
            epoch_itr=epoch_itr,
            output_queue=output_queue,
        )
    except KeyboardInterrupt:
        pass  # killed by parent, do nothing
    except Exception:
        # propagate exception to parent process, keeping original traceback
        import traceback

        error_queue.put((args.distributed_rank, traceback.format_exc()))
예제 #10
0
파일: sim900.py 프로젝트: Flasew/simsystem
    def start(self):
        '''Start streaming
        '''
        # signal handling.
        self.signaled = False
        # stores the original signals
        original_sigint = signal.getsignal(signal.SIGINT)
        original_sighup = signal.getsignal(signal.SIGHUP)
        original_sigterm = signal.getsignal(signal.SIGTERM)

        # set the new signal handlers
        signal.signal(signal.SIGINT, lambda s, f: self.set_signal())
        signal.signal(signal.SIGHUP, lambda s, f: self.set_signal())
        signal.signal(signal.SIGTERM, lambda s, f: self.set_signal())

        enable_port_sum = sum([2**i for i in self.ns_commands.keys()
                               ]) + 2**self.s_command[0]
        self.sendcmd("RPER", num=enable_port_sum)
        self.sendcmd("RDDR", num=0)

        self.s_buf = SimpleQueue()
        self.ns_buf = {0: SimpleQueue()}
        for key in self.ns_commands:
            self.ns_buf[key] = SimpleQueue()

        s_fname = self.s_fname if self.s_fname else "{:s}stream.csv".format(
            datetime.now().strftime("%Y%m%d%H%M%S"))
        ns_fname = self.ns_fname if self.ns_fname else "{:s}nostream.csv".format(
            datetime.now().strftime("%Y%m%d%H%M%S"))

        s_fwrite_proc = Process(target = \
            lambda: file_writer(s_fname, self.s_fheader, self.s_fstr, self.s_buf))
        ns_fwrite_proc = Process(target = \
            lambda: file_multi_writer(ns_fname, self.ns_fheader, self.ns_fstr, self.ns_buf))
        sort_proc = Process(target=self.sorter)
        ns_proc = Process(target=self.ns_cmd_sender)

        self.sendcmd("SNDT",
                     self.s_command[0],
                     str_block=makecmd("TPER", num=self.s_tper))
        self.sendcmd("SNDT", self.s_command[0], str_block=self.s_command[1])

        s_fwrite_proc.start()
        ns_fwrite_proc.start()
        sort_proc.start()
        ns_proc.start()

        sort_proc.join()
        ns_proc.join()
        s_fwrite_proc.join()
        ns_fwrite_proc.join()

        del self.s_buf
        del self.ns_buf

        # restore the original handlers
        signal.signal(signal.SIGINT, original_sigint)
        signal.signal(signal.SIGHUP, original_sighup)
        signal.signal(signal.SIGTERM, original_sigterm)
예제 #11
0
 def _setup_queues(self):
     from multiprocessing.queues import SimpleQueue
     self._inqueue = SimpleQueue()
     self._outqueue = SimpleQueue()
     self._ackqueue = SimpleQueue()
     self._quick_put = self._inqueue._writer.send
     self._quick_get = self._outqueue._reader.recv
     self._quick_get_ack = self._ackqueue._reader.recv
예제 #12
0
 def _setup_queues(self):
     """
         设定用于通信的SimpleQueue
     :return:
     """
     BasePool._setup_queues(self)
     self._get_data_queue = SimpleQueue()
     self._require_data_queue = SimpleQueue()
예제 #13
0
class MyPoolwithPipe(BasePool):
    """
        带管道的进程池类,为每个进程额外添加了两个带锁的管道,可以时间双工的数据传输
    """
    def __init__(self, processes=None):
        """
            MyPoolwithPipe的构造函数
        :param processes: 最大进程数
        """
        BasePool.__init__(self, processes)

    def _setup_queues(self):
        """
            设定用于通信的SimpleQueue
        :return:
        """
        BasePool._setup_queues(self)
        self._get_data_queue = SimpleQueue()
        self._require_data_queue = SimpleQueue()

    def _repopulate_pool(self):
        """Bring the number of pool processes up to the specified number,
        for use after reaping workers which have exited.
        """
        for i in range(self._processes - len(self._pool)):
            w = self.Process(
                target=myworker,
                args=(self._inqueue, self._outqueue, self._initializer,
                      self._initargs, self._maxtasksperchild,
                      self._require_data_queue, self._get_data_queue))
            self._pool.append(w)
            w.name = w.name.replace('Process', 'PoolWorker')
            w.daemon = True
            w.start()
            debug('added worker')

    def send_data(self, data):
        """
            向管道传送数据
        :param data: 数据交换类的初始化字典
        :return:
        """
        self._get_data_queue.put(DataExchange(data['head'], data['data'])())

    def get_data(self):
        """
            获得进程池内进程的数据请求
        :return: 请求的数据
        """
        return self._require_data_queue.get()

    def set_stop(self):
        """
            关闭数据服务进程
        :return:
        """
        self._require_data_queue.put(-1)
예제 #14
0
파일: logs.py 프로젝트: bmer/proofor
class Logger(object):
    def __init__(self, logfilepath):
        try:
            os.remove(logfilepath)
        except OSError:
            pass
        
        self.logfilepath = logfilepath
        self.logq = SimpleQueue()
        
        self.tags = ''
        self.num_tags = 0
        
    def add_tag(self, tag):
        #self.log("adding tag {}".format(tag))
        self.num_tags += 1
        
        if self.tags != '':
            self.tags = self.tags + '.' + tag
        else:
            self.tags = tag
            
    def remove_tag(self):
        #self.log("removing tag")
        tags = self.tags.split('.')
        self.tags = ".".join(tags[:-1])
        self.num_tags -= 1
        
    def get_tag_part(self):
        if self.tags != '':
            return self.tags + ": "
        else:
            return ''
        
    def log(self, message, start_group=None, end_group=None):
        assert(type(message)==str)        
        self.logq.put(" "*self.num_tags*4 + self.get_tag_part() + message + '\n')
            
    def getlog(self):
        return self.logq.get()
            
    def getlogs(self, n=None):
        logs = []
        if n == None:
            while not self.logq.empty():
                logs.append(self.getlog())
        else:
            assert(type(n)==int)
            while not (self.logq.empty() or len(logs) == n):
                logs.append(self.getlog())
                
        return logs
        
    def write_to_file(self):        
        # mode 'a' for append
        with open(self.logfilepath, 'a') as f:
            f.writelines(self.getlogs())
예제 #15
0
def test_simple_queue():
    q = SimpleQueue()
    input_ = [1, 2, 3, 4, 5, 6]
    from_iterable(consumers.to_simple_queue(q), input_)

    for i in input_:
        o = q.get()
        assert o == i

    assert q.empty()
예제 #16
0
파일: test_url.py 프로젝트: batmanWjw/w3af
    def test_can_pickle_via_queue(self):
        """
        https://github.com/andresriancho/w3af/issues/8748
        """
        sq = SimpleQueue()
        u1 = URL('http://www.w3af.com/')
        sq.put(u1)
        u2 = sq.get()

        self.assertEqual(u1, u2)
예제 #17
0
    def test_can_pickle_via_queue(self):
        """
        https://github.com/andresriancho/w3af/issues/8748
        """
        sq = SimpleQueue()
        u1 = URL('http://www.w3af.com/')
        sq.put(u1)
        u2 = sq.get()

        self.assertEqual(u1, u2)
예제 #18
0
def main():
    sfile = settings.BIG_FILE
    fsize = os.path.getsize(sfile)
    with  open(sfile, "r") as fh:
        chunks = size_chunks(fh, fsize, num_chunks=settings.BIGFILE_MP_CHUNKS)
    
    # Debug
    #for c in chunks:
        #print(c)
        
    q = Queue()
    pattern = re.compile(settings.TARGET_USERNAME)
    
    # consumer
    #con = multiprocessing.Process(target=opener, args=(cat(grep(pattern, writer())),))
    #con.daemon = True
    #con.start()
    
    # producer
    producers = []
    file_handles = []
    for chunk in chunks:    
        fh = open(sfile, "r")
        file_handles.append(fh)
        o = opener(cat(chunk, grep(pattern, writer(q))))
        t = multiprocessing.Process(target=sender, args=(o,))
        t.daemon = True
        producers.append(t)
        
    for p in producers:
        p.start()
        
    
    for p in producers:
        p.join()
        
    #con.join()
    q.put(None) # sentinel
    
    for f in file_handles:
        f.close()
        
    recsmatch = 0 
    print("Before queue comp")
    while True:
        x = q.get()
        if x == None:
            break
        recsmatch += 1
    print("After queue comp")
        
    
    print("recsmatch={r} chunks={c}".format(r=recsmatch,
                                        c=settings.BIGFILE_MP_CHUNKS))
예제 #19
0
 def _wait_for_updates(change_notifier: SimpleQueue):
     """
     该方法会阻塞线程等待不断从change_notifier取出内容
     :param change_notifier:
     :return:
     """
     # sentinels, timeout,
     # wait(sentinels, timeout=timeout)
     while not change_notifier.empty():
         res = change_notifier.get()
         logging.debug(f"got signal, content: {res}")
예제 #20
0
    def _setup_queues(self):
        from multiprocessing.queues import SimpleQueue
        self._inqueue = SimpleQueue()
        self._outqueue = SimpleQueue()
        self._quick_put = self._inqueue._writer.send
        self._quick_get = self._outqueue._reader.recv

        def _poll_result(timeout):
            if self._outqueue._reader.poll(timeout):
                return True, self._quick_get()
            return False, None
        self._poll_result = _poll_result
    def setUp(self):

        super(CameraSettingsTestCase,self).setUp()
        self.test_ard_cmd_queue = SimpleQueue()
        self.test_img_cmd_queue = SimpleQueue()
        self.mock_cfg = MockCFG()
        self.mock_nemacquire = MockNemacquire()
        self.camera_settings_widget = CameraSettings(
                                self.mock_nemacquire,
                                self.test_ard_cmd_queue,
                                self.test_img_cmd_queue,
                                self.mock_cfg,
                                250)        
def main():
    sfile = settings.BIG_FILE
    fsize = os.path.getsize(sfile)
    with open(sfile, "r") as fh:
        chunks = size_chunks(fh, fsize, num_chunks=settings.BIGFILE_MP_CHUNKS)

    # Debug
    # for c in chunks:
    # print(c)

    q = Queue()
    pattern = re.compile(settings.TARGET_USERNAME)

    # consumer
    # con = multiprocessing.Process(target=opener, args=(cat(grep(pattern, writer())),))
    # con.daemon = True
    # con.start()

    # producer
    producers = []
    file_handles = []
    for chunk in chunks:
        fh = open(sfile, "r")
        file_handles.append(fh)
        o = opener(cat(chunk, grep(pattern, writer(q))))
        t = multiprocessing.Process(target=sender, args=(o,))
        t.daemon = True
        producers.append(t)

    for p in producers:
        p.start()

    for p in producers:
        p.join()

    # con.join()
    q.put(None)  # sentinel

    for f in file_handles:
        f.close()

    recsmatch = 0
    print("Before queue comp")
    while True:
        x = q.get()
        if x == None:
            break
        recsmatch += 1
    print("After queue comp")

    print("recsmatch={r} chunks={c}".format(r=recsmatch, c=settings.BIGFILE_MP_CHUNKS))
예제 #23
0
파일: quiver.py 프로젝트: lfdebrux/n_bodies
def QuiverPlotter(num):
	data_q = SimpleQueue()

	plot = Process(target=quiverPlotter,args=(data_q,num))
	plot.start()

	try:
		while True:
			data = (yield)
			if data_q.empty() == False:
				continue
			data_q.put(data)
	except GeneratorExit:
		plot.join()
예제 #24
0
    def _terminate_pool(_task_queue: SimpleQueue, _in_queue: SimpleQueue,
                        out_queue: SimpleQueue, pool: List[Process],
                        change_notifier: SimpleQueue,
                        worker_handler_th: Thread, handle_task_th: Thread,
                        handle_result_th: Thread):
        """
        终止进程池
        :param _task_queue: 暂不使用
        :param _in_queue: 暂不使用
        :param out_queue: 通知结束进程
        :param pool: 进程池
        :param change_notifier: 通知状态改变
        :param worker_handler_th: worker管理进程
        :param handle_task_th: 任务管理进程
        :param handle_result_th: 执行结果管理进程
        :return:
        """
        worker_handler_th._state = State.TERMINATE
        handle_task_th._state = State.TERMINATE

        assert handle_result_th.is_alive(), "result handler not alive"
        handle_result_th._state = State.TERMINATE

        # 发送终止信号
        change_notifier.put(EndSignal.END)
        out_queue.put(EndSignal.END)

        # 等待检测进程的线程退出
        if threading.current_thread() != worker_handler_th:
            worker_handler_th.join()

        # 向进程池中的所有进程发送终止信号
        if pool:
            for p in pool:
                if p.exitcode is None:
                    p.terminate()

        # 等待任务处理线程退出
        if threading.current_thread() != handle_task_th:
            handle_task_th.join()

        # 等待处理结果线程退出
        if threading.current_thread() != handle_result_th:
            handle_result_th.join()

        # 等待所有存活的进程退出
        if pool:
            for p in pool:
                if p.is_alive():
                    p.join()
예제 #25
0
    def _wrapped_function(self, process_idx: int, function: Callable,
                          args: Any, kwargs: Any,
                          return_queue: SimpleQueue) -> None:
        self._worker_setup(process_idx)
        result = function(*args, **kwargs)
        if self.local_rank == 0:
            return_queue.put(move_data_to_device(result, "cpu"))

        # https://github.com/pytorch/xla/issues/1801#issuecomment-602799542
        self.barrier("end-process")

        # Ensure that the rank 0 process is the one exiting last
        # https://github.com/pytorch/xla/issues/2190#issuecomment-641665358
        if self.local_rank == 0:
            time.sleep(2)
예제 #26
0
파일: points.py 프로젝트: lfdebrux/n_bodies
def Plotter3D(plots,scale):
	data_q = SimpleQueue()

	plot = Process(target=plotter3D,args=(data_q,plots,scale))
	plot.start()

	data = {}
	try:
		while True:
			data.update((yield))
			if data_q.empty() == False:
				continue
			data_q.put(data)
	except GeneratorExit:
		pass
예제 #27
0
    def __init__(self, loader):
        self.dataset = loader.dataset
        self.collate_fn = loader.collate_fn
        self.batch_sampler = loader.batch_sampler
        self.num_workers = loader.num_workers
        self.pin_memory = loader.pin_memory
        self.done_event = threading.Event()

        self.sample_iter = iter(self.batch_sampler)

        if self.num_workers > 0:
            self.index_queue = SimpleQueue()
            self.data_queue = SimpleQueue()
            self.batches_outstanding = 0
            self.shutdown = False
            self.send_idx = 0
            self.rcvd_idx = 0
            self.reorder_dict = {}

            self.workers = [
                multiprocessing.Process(target=_worker_loop,
                                        args=(self.dataset, self.index_queue,
                                              self.data_queue,
                                              self.collate_fn))
                for _ in range(self.num_workers)
            ]

            for w in self.workers:
                w.daemon = True  # ensure that the worker exits on process exit
                w.start()

            if self.pin_memory:
                in_data = self.data_queue
                self.data_queue = queue.Queue()
                self.pin_thread = threading.Thread(target=_pin_memory_loop,
                                                   args=(in_data,
                                                         self.data_queue,
                                                         self.done_event))
                self.pin_thread.daemon = True
                self.pin_thread.start()

            # prime the prefetch loop
            for _ in range(2 * self.num_workers):
                self._put_indices()
        else:
            if hasattr(self.dataset, 'build'):
                # Run the build method for the dataset
                self.dataset.build()
예제 #28
0
    def __init__(self, key, task_group, randomize):
        self.key = key
        self.gen_worker = task_group['gen_worker']
        self.task_ids = task_group['task_ids']
        self.is_parallel = task_group['is_parallel']
        if self.is_parallel:
            self.randomize = randomize
            if self.randomize:
                random.shuffle(self.task_ids)
        else:
            self.randomize = False
        self.result_queue = SimpleQueue()
        self.task_queue = SimpleQueue()

        # Don't expose queues file descriptors over Popen to, say, tarantool
        # running tests.
        set_fd_cloexec(self.result_queue._reader.fileno())
        set_fd_cloexec(self.result_queue._writer.fileno())
        set_fd_cloexec(self.task_queue._reader.fileno())
        set_fd_cloexec(self.task_queue._writer.fileno())

        for task_id in self.task_ids:
            self.task_queue.put(task_id)
        self.worker_ids = set()
        self.done = False
        self.done_task_ids = set()
예제 #29
0
파일: pageblock.py 프로젝트: wyl-hit/job
def main(data):
    import sys
    from multiprocessing.queues import SimpleQueue
    url_list = []
    try:
        req = urllib2.Request(data[1])
        #res_data = urllib2.urlopen(req)
        kk = 1
    except:
        kk = 0
    if kk == 1:
        url_list.append(data[1])
        queue = SimpleQueue()
        message_type = "test"
        app = QApplication(sys.argv)
        crawler = Crawler(url_list, queue, message_type)
        crawler.crawler_start()

        sys.exit(app.exec_())
        print crawler.title
    else:
        file_name = data[1].replace("/", "_")
        file_object = open(file_name, 'w')
        file_object.write("/*/")
        file_object.close()
예제 #30
0
    def __init__(self, data_structure, processes, scan_function, init_args, _mp_init_function):
        """ Init the scanner.

        data_structure is a world.DataSet
        processes is the number of child processes to use
        scan_function is the function to use for scanning
        init_args are the arguments passed to the init function
        _mp_init_function is the function used to init the child processes
        """
        assert (isinstance(data_structure, world.DataSet))
        self.data_structure = data_structure
        self.list_files_to_scan = data_structure._get_list()
        self.processes = processes
        self.scan_function = scan_function

        # Queue used by processes to pass results
        self.queue = SimpleQueue()
        init_args.update({'queue': self.queue})
        # NOTE TO SELF: initargs doesn't handle kwargs, only args!
        # Pass a dict with all the args
        self.pool = multiprocessing.Pool(processes=processes, initializer=_mp_init_function, initargs=(init_args,))

        # TODO: make this automatic amount
        # Recommended time to sleep between polls for results
        self.SCAN_START_SLEEP_TIME = 0.001
        self.SCAN_MIN_SLEEP_TIME = 1e-6
        self.SCAN_MAX_SLEEP_TIME = 0.1
        self.scan_sleep_time = self.SCAN_START_SLEEP_TIME
        self.queries_without_results = 0
        self.last_time = time()
        self.MIN_QUERY_NUM = 1
        self.MAX_QUERY_NUM = 5

        # Holds a friendly string with the name of the last file scanned
        self._str_last_scanned = None
예제 #31
0
파일: logs.py 프로젝트: bmer/proofor
class StatusTracker(object):
    def __init__(self):
        self.logq = SimpleQueue()
        self.history = []
        
    def put(self, msg):
        assert(type(msg)==str)
        self.logq.put(msg)
        
    def flushq(self):
        while not self.logq.empty():
            self.history.append(self.logq.get())
        self.prune_history()
            
    def prune_history(self):
        self.history = self.history[-100:]
예제 #32
0
def multi_process_list_with_consumer(data, method, consumerObj, numProcessors,
                                     *args):
    if numProcessors > len(data):
        numProcessors = len(data)
    dataSplit = split_into_sublist(data, numProcessors)
    processes = [None] * numProcessors
    results = [None] * len(data)
    tempRes = SimpleQueue()
    for i in xrange(numProcessors):
        newArgs = (dataSplit[i], ) + args + (tempRes, )
        processes[i] = multiprocessing.Process(target=method, args=newArgs)
        processes[i].start()

    lastPercent = 0
    dataLen = len(data)
    startTime = time.time()
    get = tempRes.get
    for i in xrange(len(results)):
        consumerObj.process(get())
        percentDone = i / dataLen
        if percentDone - lastPercent >= 0.1:
            timeTaken = time.time() - startTime
            timeRemain = short_format_time(
                (dataLen - (i + 1)) / ((i + 1) / timeTaken))
            timeTaken = short_format_time(timeTaken)
            print int(
                percentDone * 100
            ), "percent done | time elapsed:", timeTaken, "| time remaining:", timeRemain
            lastPercent = percentDone
    for p in processes:
        p.join()
    if lastPercent != 100:
        print "100 percent done"
    print "Finished parallel processing list of length", len(results)
    return consumerObj.results()
예제 #33
0
    def __call__(self, out_queue: SimpleQueue, cache):
        while True:
            res = out_queue.get()
            if res is EndSignal.END:
                break
            logging.debug(f"[PytestResultHandler] result `{res}`")

        logging.debug("[PytestResultHandler] exiting")
예제 #34
0
    def _wrapping_function(
        self,
        process_idx: int,
        trainer: Optional["pl.Trainer"],
        function: Callable,
        args: Any,
        kwargs: Any,
        return_queue: SimpleQueue,
    ) -> None:
        self._strategy._worker_setup(process_idx)
        results = function(*args, **kwargs)

        if trainer is not None:
            results = self._collect_rank_zero_results(trainer, results)

        if self._strategy.local_rank == 0:
            return_queue.put(move_data_to_device(results, "cpu"))
예제 #35
0
def async_file_reading(fd, callback):
    """Helper which instantiate and run an AsynchronousFileReader."""
    queue = SimpleQueue()
    reader = AsynchronousFileReader(fd, queue)
    reader.start()
    consummer = Process(target=consume_queue, args=(queue, callback))
    consummer.start()
    return (reader, consummer)
예제 #36
0
    def __call__(self, task_queue: SimpleQueue, pool: List[Process],
                 in_queue: SimpleQueue, out_queue: SimpleQueue, cache):
        cur_th = threading.current_thread()

        while True:
            if cur_th._state != State.RUN:
                logging.debug('task handler found thread._state != RUN')
                break
            task = task_queue.get()
            if task is EndSignal.END:
                logging.debug("got exit signal")
                break
            assert isinstance(task, Task), "task must implement Task class"
            try:
                in_queue.put(task)
            except Exception as e:
                logging.error(e)
예제 #37
0
 def __init__(self, max_workers=None):
     _check_system_limits()
     if max_workers is None:
         self._max_workers = multiprocessing.cpu_count()
     else:
         self._max_workers = max_workers
     self._call_queue = multiprocessing.Queue(self._max_workers +
                                              EXTRA_QUEUED_CALLS)
     self._call_queue._ignore_epipe = True
     self._result_queue = SimpleQueue()
     self._work_ids = queue.Queue()
     self._queue_management_thread = None
     self._processes = {}
     self._shutdown_thread = False
     self._shutdown_lock = threading.Lock()
     self._broken = False
     self._queue_count = 0
     self._pending_work_items = {}
예제 #38
0
 def __init__(self, key, task_group, randomize):
     self.key = key
     self.gen_worker = task_group['gen_worker']
     self.task_ids = task_group['task_ids']
     self.is_parallel = task_group['is_parallel']
     if self.is_parallel:
         self.randomize = randomize
         if self.randomize:
             random.shuffle(self.task_ids)
     else:
         self.randomize = False
     self.result_queue = SimpleQueue()
     self.task_queue = SimpleQueue()
     for task_id in self.task_ids:
         self.task_queue.put(task_id)
     self.worker_ids = set()
     self.done = False
     self.done_task_ids = set()
예제 #39
0
def launch_graph_plot():
    q = SimpleQueue()
    Pyro4.config.HOST="10.1.1.2"
    daemon = Pyro4.Daemon()
    ns = Pyro4.locateNS()
    p = Process(target=_launch_daemon, args=(daemon, q,))
    p.start()
    graph_plot = GraphPlotPanel()
    while True:
        if not q.empty():
            item = q.get()
            if item[0] == 'time':
                print "got queue:", item
                graph_plot.set_time(item[1])
            elif item[0] == 'vertex_color':
                pass
        graph_plot.run()
        fpsClock.tick(60)
예제 #40
0
파일: server.py 프로젝트: ycaihua/MacRanger
class RangerControlServer(HTTPServer):
    def __init__(self, fm):
        self.fm = fm
        self.queue = SimpleQueue()
        self.goDie = False
        HTTPServer.__init__(self, ("127.0.0.1", 5964), RangerControlHandler)

    def start(self):
        self.thread = threading.Thread(target=self.process)
        self.thread.start()

    def stop(self):
        self.shutdown()

    def process(self):
        self.serve_forever()

    def check_messages(self):
        if self.queue.empty():
            return None
        return self.queue.get()

    def act_on_messages(self):
        msg = self.check_messages()
        if msg == None:
            return False

        action, arg = msg
        match = re.match(r"/cdtab-(\S+)", action)
        if match != None:
            tab = match.group(1)
            if not (tab in self.fm.tabs):
                self.fm.tab_open(tab, arg)
            else:
                self.fm.tabs[tab].enter_dir(arg)
        elif action == "/cd":
            self.fm.enter_dir(arg)
        elif action == "/cdfirst":
            first_tab = self.fm._get_tab_list()[0]
            self.fm.tabs[first_tab].enter_dir(arg)
        else:
            self.fm.notify("Unknown server command", bad=True)
        return True
예제 #41
0
class TableInterface:
    def __init__(self):
        self.input_interface = Queue()
        self.output_interface = None

    def put(self, data):
        self.output_interface.put(data)

    def get(self):
        return self.input_interface.get()
예제 #42
0
def DensityPlotter(num,size):
	# num = size/scale
	range = [[-size,size],[-size,size]]

	data_q = SimpleQueue()

	plot = Process(target=imagedraw,args=(data_q,num))
	plot.start()

	while True:
		x = (yield)

		if data_q.empty() == False:
			continue

		hist,_,_ = np.histogram2d(x[:,0],x[:,1],bins=num,range=range)
		avg = np.average(hist)
		hist = (hist - avg)/avg
		data_q.put(hist.astype(np.float32))
예제 #43
0
    def __init__(self, instructions, tables):
        self.instructions = instructions
        self.tables = tables

        self._input_interface = None
        self._output_interface = Queue()
        self._instructions = {}
        self._is_setup = False
        self._is_start = False

        self._setup()
예제 #44
0
파일: logs.py 프로젝트: bmer/proofor
 def __init__(self, logfilepath):
     try:
         os.remove(logfilepath)
     except OSError:
         pass
     
     self.logfilepath = logfilepath
     self.logq = SimpleQueue()
     
     self.tags = ''
     self.num_tags = 0
예제 #45
0
파일: line.py 프로젝트: lfdebrux/n_bodies
class LinePlotter:
	def __init__(self,*args,**kwargs):
		self.data_q = SimpleQueue()
		self.data = {}

		self.plot = LinePlotterProcess(self.data_q)
		self.plot.add_plot(*args,**kwargs)

	def show(self):
		self.plot.start()

	def add_plot(self,*args,**kwargs):
		self.plot.add_plot(*args,**kwargs)

	def send(self,data):
		if data == GeneratorExit:
			self.plot.join()

		self.data.update(data)
		if self.data_q.empty() != False:
			self.data_q.put(data)
예제 #46
0
    def __init__(self, instruction, tables):
        self._instruction = instruction
        self._tables = tables
        self.input_interface = Queue()
        self.output_interfaces = {}

        self._instruction_pipelines = []
        self._is_atomic_enabled = False
        self._is_sequential_enabled = False
        self._is_concurrent_enabled = False

        self._setup()
예제 #47
0
class ErrorMonitor:
    def __init__(self):
        self.pipe = SimpleQueue()
        self.message = None

    def main(self):
        while True:
            message = self.pipe.get()
            if message != 'Q':
                self.message = message[1:]
                LongJump.longjump()
                break
            else:
                self.pipe = None
                break
                    
    def haserror(self):
        """ master only """
        return self.message is not None
    def start(self):
        """ master only """
        self.thread = Thread(target=self.main)
        self.thread.daemon = True
        self.thread.start()
    def join(self):
        """ master only """
        try:
            self.pipe.put('Q')
            self.thread.join()
        except:
            pass
        finally:
            self.thread = None

    def slaveraise(self, type, error, traceback):
        """ slave only """
        message = 'E' * 1 + pickle.dumps((type,
            ''.join(tb.format_exception(type, error, traceback))))
        if self.pipe is not None:
            self.pipe.put(message)
예제 #48
0
def main():
    global TCP_SEND_PORT
    global TCP_SEND_IP
    global TCP_RECEIVE_IP
    global TCP_RECEIVE_PORT
    global key_store
    global eventual_requests
    global eventual_write_lock
    global eventual_read_lock
    key_store = {}
    eventual_requests = {}
    eventual_write_lock = threading.Lock()
    eventual_read_lock = threading.Lock()
    signal.signal(signal.SIGINT, signal_handler)
    TCP_RECEIVE_IP = TCP_SEND_IP = socket.gethostbyname(socket.gethostname())
    TCP_SEND_PORT = int(sys.argv[1])
    TCP_RECEIVE_PORT = int(sys.argv[2])
    BUFFER_SIZE = 1024
    listener = threading.Thread(target=listening_thread, args=[BUFFER_SIZE])
    listener.daemon = True
    listener.start()
    message_queue = SimpleQueue()
    worker = threading.Thread(target=worker_thread, args=[message_queue])
    worker.daemon = True
    worker.start()

    while 1:
        command = str(raw_input(bcolors.HEADER +  bcolors.UNDERLINE + "Enter Message:\n" + bcolors.ENDC))
        messages = []
        if command.endswith('.txt'):
            messages = readFile(command)
        else:
            messages.append(command)
        message_queue.put(messages)
        print bcolors.OKBLUE +  'System time is ' + \
                str(datetime.datetime.now().strftime("%H:%M:%S:%f")) + bcolors.ENDC
예제 #49
0
파일: arbiter.py 프로젝트: wong2/larus
    def setup(self):
        self.pid = os.getpid()
        self.worker_nums = self.config['workers']
        self.worker_class = SyncWorker
        self.queue = SimpleQueue()
        self.setup_logger()
        self.setup_signals()

        addresses = self.config['binds']
        self.sockets = create_sockets(addresses, self.logger)

        addresses_str = ', '.join(map(format_addr_str, addresses))
        self.logger.info('Arbiter booted')
        self.logger.info('Listening on: %s (%s)', addresses_str, self.pid)
        self.logger.info('Using worker: %s', self.worker_class)
예제 #50
0
 def __init__(self, max_workers=None):
     _check_system_limits()
     if max_workers is None:
         self._max_workers = multiprocessing.cpu_count()
     else:
         self._max_workers = max_workers
     self._call_queue = multiprocessing.Queue(self._max_workers + EXTRA_QUEUED_CALLS)
     self._call_queue._ignore_epipe = True
     self._result_queue = SimpleQueue()
     self._work_ids = queue.Queue()
     self._queue_management_thread = None
     self._processes = {}
     self._shutdown_thread = False
     self._shutdown_lock = threading.Lock()
     self._broken = False
     self._queue_count = 0
     self._pending_work_items = {}
예제 #51
0
    def __init__(self, max_workers=None):
        """Initializes a new ProcessPoolExecutor instance.

        Args:
            max_workers: The maximum number of processes that can be used to
                execute the given calls. If None or not given then as many
                worker processes will be created as the machine has processors.
        """
        _check_system_limits()

        if max_workers is None:
            self._max_workers = multiprocessing.cpu_count() or 1
        else:
            if max_workers <= 0:
                raise ValueError("max_workers must be greater than 0")

            self._max_workers = max_workers

        # Make the call queue slightly larger than the number of processes to
        # prevent the worker processes from idling. But don't make it too big
        # because futures in the call queue cannot be cancelled.
        self._call_queue = multiprocessing.Queue(self._max_workers +
                                                 EXTRA_QUEUED_CALLS)
        # Killed worker processes can produce spurious "broken pipe"
        # tracebacks in the queue's own worker thread. But we detect killed
        # processes anyway, so silence the tracebacks.
        self._call_queue._ignore_epipe = True
        self._result_queue = SimpleQueue()
        self._work_ids = queue.Queue()
        self._queue_management_thread = None
        # Map of pids to processes
        self._processes = {}

        # Shutdown is a two-step process.
        self._shutdown_thread = False
        self._shutdown_lock = threading.Lock()
        self._broken = False
        self._queue_count = 0
        self._pending_work_items = {}
예제 #52
0
class BaseMultiprocessingRunner(BaseRunner):
    def __init__(self):
        super(BaseMultiprocessingRunner, self).__init__()
        self.numprocs = max(multiprocessing.cpu_count() - 1, 1)
        self.map_input_queue = SimpleQueue()
        self.map_output_queue = SimpleQueue()

    def run_map(self):
        for item in iter(self.map_input_queue.get, self.STOP_MSG):
            self.job.map(item, self.map_output_queue.put)
        self.map_output_queue.put(self.STOP_MSG)
        if self.debug:
            debug_print("Output : STOP sent")

    def run_enumerate(self):
        for inp in self.job.enumerate():
            self.map_input_queue.put(inp)
        for work in range(self.numprocs):
            self.map_input_queue.put(self.STOP_MSG)
        if self.debug:
            debug_print("Input: STOP sent")

    def run(self, job):
        self.job = job
        # Process that reads the input file
        self.enumeration_process = multiprocessing.Process(target=self.run_enumerate, args=())

        self.mappers = [multiprocessing.Process(target=self.run_map, args=()) for i in range(self.numprocs)]

        self.enumeration_process.start()
        for mapper in self.mappers:
            mapper.start()
        r = self.run_reduce()
        self.enumeration_process.join()
        for mapper in self.mappers:
            mapper.join()
        return r
예제 #53
0
class ProcessPoolExecutor(_base.Executor):
    def __init__(self, max_workers=None):
        """Initializes a new ProcessPoolExecutor instance.

        Args:
            max_workers: The maximum number of processes that can be used to
                execute the given calls. If None or not given then as many
                worker processes will be created as the machine has processors.
        """
        _check_system_limits()

        if max_workers is None:
            self._max_workers = multiprocessing.cpu_count()
        else:
            self._max_workers = max_workers

        # Make the call queue slightly larger than the number of processes to
        # prevent the worker processes from idling. But don't make it too big
        # because futures in the call queue cannot be cancelled.
        self._call_queue = multiprocessing.Queue(self._max_workers +
                                                 EXTRA_QUEUED_CALLS)
        # Killed worker processes can produce spurious "broken pipe"
        # tracebacks in the queue's own worker thread. But we detect killed
        # processes anyway, so silence the tracebacks.
        self._call_queue._ignore_epipe = True
        self._result_queue = SimpleQueue()
        self._work_ids = queue.Queue()
        self._queue_management_thread = None
        # Map of pids to processes
        self._processes = {}

        # Shutdown is a two-step process.
        self._shutdown_thread = False
        self._shutdown_lock = threading.Lock()
        self._broken = False
        self._queue_count = 0
        self._pending_work_items = {}

    def _start_queue_management_thread(self):
        # When the executor gets lost, the weakref callback will wake up
        # the queue management thread.
        def weakref_cb(_, q=self._result_queue):
            q.put(None)
        if self._queue_management_thread is None:
            # Start the processes so that their sentinels are known.
            self._adjust_process_count()
            self._queue_management_thread = threading.Thread(
                    target=_queue_management_worker,
                    args=(weakref.ref(self, weakref_cb),
                          self._processes,
                          self._pending_work_items,
                          self._work_ids,
                          self._call_queue,
                          self._result_queue))
            self._queue_management_thread.daemon = True
            self._queue_management_thread.start()
            _threads_queues[self._queue_management_thread] = self._result_queue

    def _adjust_process_count(self):
        for _ in range(len(self._processes), self._max_workers):
            p = multiprocessing.Process(
                    target=_process_worker,
                    args=(self._call_queue,
                          self._result_queue))
            p.start()
            self._processes[p.pid] = p

    def submit(self, fn, *args, **kwargs):
        with self._shutdown_lock:
            if self._broken:
                raise BrokenProcessPool('A child process terminated '
                    'abruptly, the process pool is not usable anymore')
            if self._shutdown_thread:
                raise RuntimeError('cannot schedule new futures after shutdown')

            f = _base.Future()
            w = _WorkItem(f, fn, args, kwargs)

            self._pending_work_items[self._queue_count] = w
            self._work_ids.put(self._queue_count)
            self._queue_count += 1
            # Wake up queue management thread
            self._result_queue.put(None)

            self._start_queue_management_thread()
            return f
    submit.__doc__ = _base.Executor.submit.__doc__

    def shutdown(self, wait=True):
        with self._shutdown_lock:
            self._shutdown_thread = True
        if self._queue_management_thread:
            # Wake up queue management thread
            self._result_queue.put(None)
            if wait:
                self._queue_management_thread.join()
        # To reduce the risk of opening too many files, remove references to
        # objects that use file descriptors.
        self._queue_management_thread = None
        self._call_queue = None
        self._result_queue = None
        self._processes = None
    shutdown.__doc__ = _base.Executor.shutdown.__doc__
예제 #54
0
    def _handle_ASes(self):
        """Spawns several processes (based on the available CPUs) to handle the
        AS resolving and creates the necessary objects based on the results.
        """
        # Gather all the ASNs seen through filter and recursive resolving.
        all_ASNs = list((self.recursed_ASes | self.AS_list) - self.black_list)
        all_ASNs_count = len(all_ASNs)
        if all_ASNs_count < 1:
            return

        # We will devote all but one core to resolving since the main process
        # will handle the objects' creation.
        number_of_resolvers = mp.cpu_count() - 1
        if number_of_resolvers < 1:
            number_of_resolvers = 1

        # The list of ASNs is going to be distributed almost equally to the
        # available resolvers.
        if all_ASNs_count >= number_of_resolvers:
            slice_length = int(math.ceil(all_ASNs_count / float(number_of_resolvers)))
        else:
            number_of_resolvers = all_ASNs_count
            slice_length = 1

        result_q = SimpleQueue()  # NOTE: Only works with this queue.
        processes = []
        slice_start = 0
        for i in xrange(number_of_resolvers):
            ASN_batch = all_ASNs[slice_start:slice_start+slice_length]
            processes.append(mp.Process(target=_subprocess_AS_resolving, args=(ASN_batch, result_q)).start())
            slice_start += slice_length

        # PROGRESS START
        # Show progress while running.
        # Can be safely commented out until PROGRESS END.
        aps_count = 0
        aps = 0
        time_start = time.time()
        # PROGRESS END

        done = 0
        while done < all_ASNs_count:
            try:
                asn, routes = result_q.get()
            except Empty:
                # This should never be reached with this queue but left here
                # just in case.
                time.sleep(0.2)
                continue

            # If the AS has routes create the appropriate ASN object and add it
            # to the data pool.
            if routes is not None and (routes['ipv4'] or routes['ipv6']):
                ASN_object = rpsl.ASObject(asn)
                for prefix in routes['ipv4']:
                    route_object = rpsl.RouteObject(prefix, asn)
                    ASN_object.route_obj_dir.append_route_obj(route_object)
                for prefix in routes['ipv6']:
                    route6_object = rpsl.Route6Object(prefix, asn)
                    ASN_object.route_obj_dir.append_route_obj(route6_object)
                self.AS_dir.append_ASN_obj(ASN_object)
            done += 1

        # PROGRESS START
        # Show progress while running.
        # Can be safely commented out until PROGRESS END.
            aps_count += 1
            time_diff = time.time() - time_start
            if time_diff >= 1:
                aps = aps_count / time_diff
                aps_count = 0
                time_start = time.time()
            sys.stdout.write("{} of {} ASes | {:.0f} ASes/s          \r"
                             .format(done, all_ASNs_count, aps))
            sys.stdout.flush()
        print
def spawn_import_clients(options, files_info):
    # Spawn one reader process for each db.table, as well as many client processes
    task_queue = SimpleQueue()
    error_queue = SimpleQueue()
    exit_event = multiprocessing.Event()
    interrupt_event = multiprocessing.Event()
    errors = []
    reader_procs = []
    client_procs = []

    parent_pid = os.getpid()
    signal.signal(signal.SIGINT, lambda a, b: abort_import(a, b, parent_pid, exit_event, task_queue, client_procs, interrupt_event))

    try:
        progress_info = []
        rows_written = multiprocessing.Value(ctypes.c_longlong, 0)

        for i in xrange(options["clients"]):
            client_procs.append(multiprocessing.Process(target=client_process,
                                                        args=(options["host"],
                                                              options["port"],
                                                              options["auth_key"],
                                                              task_queue,
                                                              error_queue,
                                                              rows_written,
                                                              options["force"],
                                                              options["durability"])))
            client_procs[-1].start()

        for file_info in files_info:
            progress_info.append((multiprocessing.Value(ctypes.c_longlong, -1), # Current lines/bytes processed
                                  multiprocessing.Value(ctypes.c_longlong, 0))) # Total lines/bytes to process
            reader_procs.append(multiprocessing.Process(target=table_reader,
                                                        args=(options,
                                                              file_info,
                                                              task_queue,
                                                              error_queue,
                                                              progress_info[-1],
                                                              exit_event)))
            reader_procs[-1].start()

        # Wait for all reader processes to finish - hooray, polling
        while len(reader_procs) > 0:
            time.sleep(0.1)
            # If an error has occurred, exit out early
            if not error_queue.empty():
                exit_event.set()
            reader_procs = [proc for proc in reader_procs if proc.is_alive()]
            update_progress(progress_info)

        # Wait for all clients to finish
        alive_clients = sum([client.is_alive() for client in client_procs])
        for i in xrange(alive_clients):
            task_queue.put("exit")

        while len(client_procs) > 0:
            time.sleep(0.1)
            client_procs = [client for client in client_procs if client.is_alive()]

        # If we were successful, make sure 100% progress is reported
        if error_queue.empty() and not interrupt_event.is_set():
            print_progress(1.0)

        def plural(num, text):
            return "%d %s%s" % (num, text, "" if num == 1 else "s")

        # Continue past the progress output line
        print("")
        print("%s imported in %s" % (plural(rows_written.value, "row"),
                                     plural(len(files_info), "table")))
    finally:
        signal.signal(signal.SIGINT, signal.SIG_DFL)

    if interrupt_event.is_set():
        raise RuntimeError("Interrupted")

    if not task_queue.empty():
        error_queue.put((RuntimeError, RuntimeError("Error: Items remaining in the task queue"), None))

    if not error_queue.empty():
        # multiprocessing queues don't handling tracebacks, so they've already been stringified in the queue
        while not error_queue.empty():
            error = error_queue.get()
            print("%s" % error[1], file=sys.stderr)
            if options["debug"]:
                print("%s traceback: %s" % (error[0].__name__, error[2]), file=sys.stderr)
            if len(error) == 4:
                print("In file: %s" % error[3], file=sys.stderr)
        raise RuntimeError("Errors occurred during import")
예제 #56
0
파일: queues.py 프로젝트: 0x554simon/w3af
 def __init__(self):
     SimpleQueue.__init__(self)
     self._qsize = 0
예제 #57
0
def magic_memit(self, line=''):
    """Measure memory usage of a Python statement

    Usage, in line mode:
      %memit [-ir<R>t<T>] statement

    Options:
    -r<R>: repeat the loop iteration <R> times and take the best result.
    Default: 1

    -i: run the code in the current environment, without forking a new process.
    This is required on some MacOS versions of Accelerate if your line contains
    a call to `np.dot`.

    -t<T>: timeout after <T> seconds. Unused if `-i` is active. Default: None

    Examples
    --------
    ::

      In [1]: import numpy as np

      In [2]: %memit np.zeros(1e7)
      maximum of 1: 76.402344 MB per loop

      In [3]: %memit np.ones(1e6)
      maximum of 1: 7.820312 MB per loop

      In [4]: %memit -r 10 np.empty(1e8)
      maximum of 10: 0.101562 MB per loop

      In [5]: memit -t 3 while True: pass;
      Subprocess timed out.
      Subprocess timed out.
      Subprocess timed out.
      ERROR: all subprocesses exited unsuccessfully. Try again with the `-i`
      option.
      maximum of 1: -inf MB per loop

    """
    opts, stmt = self.parse_options(line, 'r:t:i', posix=False, strict=False)
    repeat = int(getattr(opts, 'r', 1))
    if repeat < 1:
        repeat == 1
    timeout = int(getattr(opts, 't', 0))
    if timeout <= 0:
        timeout = None
    run_in_place = hasattr(opts, 'i')

    # Don't depend on multiprocessing:
    try:
        import multiprocessing as pr
        from multiprocessing.queues import SimpleQueue
        q = SimpleQueue()
    except ImportError:
        class ListWithPut(list):
            "Just a list where the `append` method is aliased to `put`."
            def put(self, x):
                self.append(x)
        q = ListWithPut()
        print ('WARNING: cannot import module `multiprocessing`. Forcing the'
               '`-i` option.')
        run_in_place = True

    ns = self.shell.user_ns

    if run_in_place:
        for _ in xrange(repeat):
            _get_usage(q, stmt, ns=ns)
    else:
        # run in consecutive subprocesses
        at_least_one_worked = False
        for _ in xrange(repeat):
            p = pr.Process(target=_get_usage, args=(q, stmt, 'pass', ns))
            p.start()
            p.join(timeout=timeout)
            if p.exitcode == 0:
                at_least_one_worked = True
            else:
                p.terminate()
                if p.exitcode == None:
                    print('Subprocess timed out.')
                else:
                    print('Subprocess exited with code %d.' % p.exitcode)
                q.put(float('-inf'))

        if not at_least_one_worked:
            print ('ERROR: all subprocesses exited unsuccessfully. Try again '
                   'with the `-i` option.')

    usages = [q.get() for _ in xrange(repeat)]
    usage = max(usages)
    print('maximum of %d: %f MB per loop' % (repeat, usage))
예제 #58
0
파일: server.py 프로젝트: ycaihua/MacRanger
 def __init__(self, fm):
     self.fm = fm
     self.queue = SimpleQueue()
     self.goDie = False
     HTTPServer.__init__(self, ("127.0.0.1", 5964), RangerControlHandler)