コード例 #1
0
ファイル: pool.py プロジェクト: VenmoTools/test-tool-box
 def __call__(self, in_queue: SimpleQueue, out_queue: SimpleQueue,
              init_args, wrap_exception, *args, **kwargs):
     if init_args:
         self.initializer(init_args)
     while True:
         try:
             logging.debug("waiting recv task")
             task = in_queue.get()
             logging.debug("task received")
         except (EOFError, OSError):
             logging.debug('worker got EOFError or OSError -- exiting')
             break
         if task is None:
             logging.debug('worker got sentinel -- exiting')
             break
         p_args = task.args()
         if isinstance(p_args, Tuple):
             args_l, plugins = p_args
             exit_code = self._main(args_l, plugins)
         else:
             exit_code = self._main(p_args)
         try:
             out_queue.put(exit_code)
         except Exception as e:
             out_queue.put(e)
コード例 #2
0
ファイル: pool.py プロジェクト: VenmoTools/test-tool-box
    def __call__(self, out_queue: SimpleQueue, cache):
        while True:
            res = out_queue.get()
            if res is EndSignal.END:
                break
            logging.debug(f"[PytestResultHandler] result `{res}`")

        logging.debug("[PytestResultHandler] exiting")
コード例 #3
0
class MyPoolwithPipe(BasePool):
    """
        带管道的进程池类,为每个进程额外添加了两个带锁的管道,可以时间双工的数据传输
    """
    def __init__(self, processes=None):
        """
            MyPoolwithPipe的构造函数
        :param processes: 最大进程数
        """
        BasePool.__init__(self, processes)

    def _setup_queues(self):
        """
            设定用于通信的SimpleQueue
        :return:
        """
        BasePool._setup_queues(self)
        self._get_data_queue = SimpleQueue()
        self._require_data_queue = SimpleQueue()

    def _repopulate_pool(self):
        """Bring the number of pool processes up to the specified number,
        for use after reaping workers which have exited.
        """
        for i in range(self._processes - len(self._pool)):
            w = self.Process(
                target=myworker,
                args=(self._inqueue, self._outqueue, self._initializer,
                      self._initargs, self._maxtasksperchild,
                      self._require_data_queue, self._get_data_queue))
            self._pool.append(w)
            w.name = w.name.replace('Process', 'PoolWorker')
            w.daemon = True
            w.start()
            debug('added worker')

    def send_data(self, data):
        """
            向管道传送数据
        :param data: 数据交换类的初始化字典
        :return:
        """
        self._get_data_queue.put(DataExchange(data['head'], data['data'])())

    def get_data(self):
        """
            获得进程池内进程的数据请求
        :return: 请求的数据
        """
        return self._require_data_queue.get()

    def set_stop(self):
        """
            关闭数据服务进程
        :return:
        """
        self._require_data_queue.put(-1)
コード例 #4
0
ファイル: logs.py プロジェクト: bmer/proofor
class Logger(object):
    def __init__(self, logfilepath):
        try:
            os.remove(logfilepath)
        except OSError:
            pass
        
        self.logfilepath = logfilepath
        self.logq = SimpleQueue()
        
        self.tags = ''
        self.num_tags = 0
        
    def add_tag(self, tag):
        #self.log("adding tag {}".format(tag))
        self.num_tags += 1
        
        if self.tags != '':
            self.tags = self.tags + '.' + tag
        else:
            self.tags = tag
            
    def remove_tag(self):
        #self.log("removing tag")
        tags = self.tags.split('.')
        self.tags = ".".join(tags[:-1])
        self.num_tags -= 1
        
    def get_tag_part(self):
        if self.tags != '':
            return self.tags + ": "
        else:
            return ''
        
    def log(self, message, start_group=None, end_group=None):
        assert(type(message)==str)        
        self.logq.put(" "*self.num_tags*4 + self.get_tag_part() + message + '\n')
            
    def getlog(self):
        return self.logq.get()
            
    def getlogs(self, n=None):
        logs = []
        if n == None:
            while not self.logq.empty():
                logs.append(self.getlog())
        else:
            assert(type(n)==int)
            while not (self.logq.empty() or len(logs) == n):
                logs.append(self.getlog())
                
        return logs
        
    def write_to_file(self):        
        # mode 'a' for append
        with open(self.logfilepath, 'a') as f:
            f.writelines(self.getlogs())
コード例 #5
0
class TableInterface:
    def __init__(self):
        self.input_interface = Queue()
        self.output_interface = None

    def put(self, data):
        self.output_interface.put(data)

    def get(self):
        return self.input_interface.get()
コード例 #6
0
    def test_can_pickle_via_queue(self):
        """
        https://github.com/andresriancho/w3af/issues/8748
        """
        sq = SimpleQueue()
        u1 = URL('http://www.w3af.com/')
        sq.put(u1)
        u2 = sq.get()

        self.assertEqual(u1, u2)
コード例 #7
0
ファイル: test_consumers.py プロジェクト: grimborg/poultry
def test_simple_queue():
    q = SimpleQueue()
    input_ = [1, 2, 3, 4, 5, 6]
    from_iterable(consumers.to_simple_queue(q), input_)

    for i in input_:
        o = q.get()
        assert o == i

    assert q.empty()
コード例 #8
0
ファイル: test_url.py プロジェクト: batmanWjw/w3af
    def test_can_pickle_via_queue(self):
        """
        https://github.com/andresriancho/w3af/issues/8748
        """
        sq = SimpleQueue()
        u1 = URL('http://www.w3af.com/')
        sq.put(u1)
        u2 = sq.get()

        self.assertEqual(u1, u2)
コード例 #9
0
def main():
    sfile = settings.BIG_FILE
    fsize = os.path.getsize(sfile)
    with  open(sfile, "r") as fh:
        chunks = size_chunks(fh, fsize, num_chunks=settings.BIGFILE_MP_CHUNKS)
    
    # Debug
    #for c in chunks:
        #print(c)
        
    q = Queue()
    pattern = re.compile(settings.TARGET_USERNAME)
    
    # consumer
    #con = multiprocessing.Process(target=opener, args=(cat(grep(pattern, writer())),))
    #con.daemon = True
    #con.start()
    
    # producer
    producers = []
    file_handles = []
    for chunk in chunks:    
        fh = open(sfile, "r")
        file_handles.append(fh)
        o = opener(cat(chunk, grep(pattern, writer(q))))
        t = multiprocessing.Process(target=sender, args=(o,))
        t.daemon = True
        producers.append(t)
        
    for p in producers:
        p.start()
        
    
    for p in producers:
        p.join()
        
    #con.join()
    q.put(None) # sentinel
    
    for f in file_handles:
        f.close()
        
    recsmatch = 0 
    print("Before queue comp")
    while True:
        x = q.get()
        if x == None:
            break
        recsmatch += 1
    print("After queue comp")
        
    
    print("recsmatch={r} chunks={c}".format(r=recsmatch,
                                        c=settings.BIGFILE_MP_CHUNKS))
コード例 #10
0
ファイル: pool.py プロジェクト: VenmoTools/test-tool-box
 def _wait_for_updates(change_notifier: SimpleQueue):
     """
     该方法会阻塞线程等待不断从change_notifier取出内容
     :param change_notifier:
     :return:
     """
     # sentinels, timeout,
     # wait(sentinels, timeout=timeout)
     while not change_notifier.empty():
         res = change_notifier.get()
         logging.debug(f"got signal, content: {res}")
def main():
    sfile = settings.BIG_FILE
    fsize = os.path.getsize(sfile)
    with open(sfile, "r") as fh:
        chunks = size_chunks(fh, fsize, num_chunks=settings.BIGFILE_MP_CHUNKS)

    # Debug
    # for c in chunks:
    # print(c)

    q = Queue()
    pattern = re.compile(settings.TARGET_USERNAME)

    # consumer
    # con = multiprocessing.Process(target=opener, args=(cat(grep(pattern, writer())),))
    # con.daemon = True
    # con.start()

    # producer
    producers = []
    file_handles = []
    for chunk in chunks:
        fh = open(sfile, "r")
        file_handles.append(fh)
        o = opener(cat(chunk, grep(pattern, writer(q))))
        t = multiprocessing.Process(target=sender, args=(o,))
        t.daemon = True
        producers.append(t)

    for p in producers:
        p.start()

    for p in producers:
        p.join()

    # con.join()
    q.put(None)  # sentinel

    for f in file_handles:
        f.close()

    recsmatch = 0
    print("Before queue comp")
    while True:
        x = q.get()
        if x == None:
            break
        recsmatch += 1
    print("After queue comp")

    print("recsmatch={r} chunks={c}".format(r=recsmatch, c=settings.BIGFILE_MP_CHUNKS))
コード例 #12
0
ファイル: logs.py プロジェクト: bmer/proofor
class StatusTracker(object):
    def __init__(self):
        self.logq = SimpleQueue()
        self.history = []
        
    def put(self, msg):
        assert(type(msg)==str)
        self.logq.put(msg)
        
    def flushq(self):
        while not self.logq.empty():
            self.history.append(self.logq.get())
        self.prune_history()
            
    def prune_history(self):
        self.history = self.history[-100:]
コード例 #13
0
ファイル: pool.py プロジェクト: VenmoTools/test-tool-box
    def __call__(self, task_queue: SimpleQueue, pool: List[Process],
                 in_queue: SimpleQueue, out_queue: SimpleQueue, cache):
        cur_th = threading.current_thread()

        while True:
            if cur_th._state != State.RUN:
                logging.debug('task handler found thread._state != RUN')
                break
            task = task_queue.get()
            if task is EndSignal.END:
                logging.debug("got exit signal")
                break
            assert isinstance(task, Task), "task must implement Task class"
            try:
                in_queue.put(task)
            except Exception as e:
                logging.error(e)
コード例 #14
0
def launch_graph_plot():
    q = SimpleQueue()
    Pyro4.config.HOST="10.1.1.2"
    daemon = Pyro4.Daemon()
    ns = Pyro4.locateNS()
    p = Process(target=_launch_daemon, args=(daemon, q,))
    p.start()
    graph_plot = GraphPlotPanel()
    while True:
        if not q.empty():
            item = q.get()
            if item[0] == 'time':
                print "got queue:", item
                graph_plot.set_time(item[1])
            elif item[0] == 'vertex_color':
                pass
        graph_plot.run()
        fpsClock.tick(60)
コード例 #15
0
ファイル: server.py プロジェクト: ycaihua/MacRanger
class RangerControlServer(HTTPServer):
    def __init__(self, fm):
        self.fm = fm
        self.queue = SimpleQueue()
        self.goDie = False
        HTTPServer.__init__(self, ("127.0.0.1", 5964), RangerControlHandler)

    def start(self):
        self.thread = threading.Thread(target=self.process)
        self.thread.start()

    def stop(self):
        self.shutdown()

    def process(self):
        self.serve_forever()

    def check_messages(self):
        if self.queue.empty():
            return None
        return self.queue.get()

    def act_on_messages(self):
        msg = self.check_messages()
        if msg == None:
            return False

        action, arg = msg
        match = re.match(r"/cdtab-(\S+)", action)
        if match != None:
            tab = match.group(1)
            if not (tab in self.fm.tabs):
                self.fm.tab_open(tab, arg)
            else:
                self.fm.tabs[tab].enter_dir(arg)
        elif action == "/cd":
            self.fm.enter_dir(arg)
        elif action == "/cdfirst":
            first_tab = self.fm._get_tab_list()[0]
            self.fm.tabs[first_tab].enter_dir(arg)
        else:
            self.fm.notify("Unknown server command", bad=True)
        return True
コード例 #16
0
class ErrorMonitor:
    def __init__(self):
        self.pipe = SimpleQueue()
        self.message = None

    def main(self):
        while True:
            message = self.pipe.get()
            if message != 'Q':
                self.message = message[1:]
                LongJump.longjump()
                break
            else:
                self.pipe = None
                break

    def haserror(self):
        """ master only """
        return self.message is not None

    def start(self):
        """ master only """
        self.thread = Thread(target=self.main)
        self.thread.daemon = True
        self.thread.start()

    def join(self):
        """ master only """
        try:
            self.pipe.put('Q')
            self.thread.join()
        except:
            pass
        finally:
            self.thread = None

    def slaveraise(self, type, error, traceback):
        """ slave only """
        message = 'E' * 1 + pickle.dumps(
            (type, ''.join(tb.format_exception(type, error, traceback))))
        if self.pipe is not None:
            self.pipe.put(message)
コード例 #17
0
ファイル: server.py プロジェクト: trishume/MacRanger
class RangerControlServer(HTTPServer):
    def __init__(self, fm):
        self.fm = fm
        self.queue = SimpleQueue()
        self.goDie = False
        HTTPServer.__init__(self, ('127.0.0.1', 5964), RangerControlHandler)

    def start(self):
        self.thread = threading.Thread(target=self.process)
        self.thread.start()

    def stop(self):
        self.shutdown()

    def process(self):
        self.serve_forever()

    def check_messages(self):
        if self.queue.empty():
            return None
        return self.queue.get()

    def act_on_messages(self):
        msg = self.check_messages()
        if msg == None: return False

        action, arg = msg
        match = re.match(r"/cdtab-(\S+)", action)
        if match != None:
            tab = match.group(1)
            if not (tab in self.fm.tabs):
                self.fm.tab_open(tab, arg)
            else:
                self.fm.tabs[tab].enter_dir(arg)
        elif action == "/cd":
            self.fm.enter_dir(arg)
        elif action == "/cdfirst":
            first_tab = self.fm._get_tab_list()[0]
            self.fm.tabs[first_tab].enter_dir(arg)
        else:
            self.fm.notify("Unknown server command", bad=True)
        return True
コード例 #18
0
ファイル: parallel.py プロジェクト: StevenLOL/sharedmem
class ErrorMonitor:
    def __init__(self):
        self.pipe = SimpleQueue()
        self.message = None

    def main(self):
        while True:
            message = self.pipe.get()
            if message != 'Q':
                self.message = message[1:]
                LongJump.longjump()
                break
            else:
                self.pipe = None
                break
                    
    def haserror(self):
        """ master only """
        return self.message is not None
    def start(self):
        """ master only """
        self.thread = Thread(target=self.main)
        self.thread.daemon = True
        self.thread.start()
    def join(self):
        """ master only """
        try:
            self.pipe.put('Q')
            self.thread.join()
        except:
            pass
        finally:
            self.thread = None

    def slaveraise(self, type, error, traceback):
        """ slave only """
        message = 'E' * 1 + pickle.dumps((type,
            ''.join(tb.format_exception(type, error, traceback))))
        if self.pipe is not None:
            self.pipe.put(message)
コード例 #19
0
res_q = SimpleQueue()
end_eval = Event()
for gpu in range(nb_gpus):
    prc_g.append(Process(target=eval,
                         args=(gpu, img_q, res_q, end_eval)))
    prc_g[-1].daemon=True
    prc_g[-1].start()

for i in range(num_t):
    prc_l[i].join()
    print('join prefetch thread %d' % i)
print('prefetch end')

end_eval.set()
print('end eval')

eval_cnt = 0
c1, c5 = 0, 0
for gpu in range(nb_gpus):
    print('join gpu %d' % gpu)
    prc_g[gpu].join()
    eval_res = res_q.get()
    eval_cnt += eval_res.img_cnt
    c1       += eval_res.c1
    c5       += eval_res.c5
assert eval_cnt == img_tlt
print('='*40)
print('top 1: %0.4f\ttop 5: %0.4f' % (c1/img_tlt, c5/img_tlt))
print('='*40)

コード例 #20
0
def magic_memit(self, line=''):
    """Measure memory usage of a Python statement

    Usage, in line mode:
      %memit [-ir<R>t<T>] statement

    Options:
    -r<R>: repeat the loop iteration <R> times and take the best result.
    Default: 1

    -i: run the code in the current environment, without forking a new process.
    This is required on some MacOS versions of Accelerate if your line contains
    a call to `np.dot`.

    -t<T>: timeout after <T> seconds. Unused if `-i` is active. Default: None

    Examples
    --------
    ::

      In [1]: import numpy as np

      In [2]: %memit np.zeros(1e7)
      maximum of 1: 76.402344 MB per loop

      In [3]: %memit np.ones(1e6)
      maximum of 1: 7.820312 MB per loop

      In [4]: %memit -r 10 np.empty(1e8)
      maximum of 10: 0.101562 MB per loop

      In [5]: memit -t 3 while True: pass;
      Subprocess timed out.
      Subprocess timed out.
      Subprocess timed out.
      ERROR: all subprocesses exited unsuccessfully. Try again with the `-i`
      option.
      maximum of 1: -inf MB per loop

    """
    opts, stmt = self.parse_options(line, 'r:t:i', posix=False, strict=False)
    repeat = int(getattr(opts, 'r', 1))
    if repeat < 1:
        repeat == 1
    timeout = int(getattr(opts, 't', 0))
    if timeout <= 0:
        timeout = None
    run_in_place = hasattr(opts, 'i')

    # Don't depend on multiprocessing:
    try:
        import multiprocessing as pr
        from multiprocessing.queues import SimpleQueue
        q = SimpleQueue()
    except ImportError:
        class ListWithPut(list):
            "Just a list where the `append` method is aliased to `put`."
            def put(self, x):
                self.append(x)
        q = ListWithPut()
        print ('WARNING: cannot import module `multiprocessing`. Forcing the'
               '`-i` option.')
        run_in_place = True

    ns = self.shell.user_ns

    if run_in_place:
        for _ in xrange(repeat):
            _get_usage(q, stmt, ns=ns)
    else:
        # run in consecutive subprocesses
        at_least_one_worked = False
        for _ in xrange(repeat):
            p = pr.Process(target=_get_usage, args=(q, stmt, 'pass', ns))
            p.start()
            p.join(timeout=timeout)
            if p.exitcode == 0:
                at_least_one_worked = True
            else:
                p.terminate()
                if p.exitcode == None:
                    print('Subprocess timed out.')
                else:
                    print('Subprocess exited with code %d.' % p.exitcode)
                q.put(float('-inf'))

        if not at_least_one_worked:
            print ('ERROR: all subprocesses exited unsuccessfully. Try again '
                   'with the `-i` option.')

    usages = [q.get() for _ in xrange(repeat)]
    usage = max(usages)
    print('maximum of %d: %f MB per loop' % (repeat, usage))
コード例 #21
0
class GroupProcess():
    def __init__(self, instruction, tables):
        self._instruction = instruction
        self._tables = tables
        self.input_interface = Queue()
        self.output_interfaces = {}

        self._instruction_pipelines = []
        self._is_atomic_enabled = False
        self._is_sequential_enabled = False
        self._is_concurrent_enabled = False

        self._setup()

    def _setup(self):
        if isinstance(self._instruction, I.ATM):
            self._code = self._instruction.code
            self._instruction_pipelines.append(Pipeline(self._code.instructions, self._tables))
            self._atomic_process = Process(target=self.run_atomic)
            self._is_atomic_enabled = True
        elif isinstance(self._instruction, I.SEQ):
            self._code = self._instruction.code
            self._instruction_pipelines.append(Pipeline(self._code.instructions, self._tables))
            self._sequential_ingress_process = Process(target=self.run_sequential_ingress)
            self._sequential_egress_process = Process(target=self.run_sequential_egress)
            self._metadata_queue = Queue()
            self._is_sequential_enabled = True
        elif isinstance(self._instruction, I.CNC):
            # Note: CNC can't have PUSH/POP instructions in its code blocks. They violate the concurrency invariant.
            self._codes = self._instruction.codes
            self._modified_locations = []
            self._modified_reserved_fields = []
            self._modified_fields = []
            for code in self._codes:
                self._instruction_pipelines.append(Pipeline(code.instructions, self._tables))
                self._modified_locations.append(get_modified_locations(code.instructions))
                self._modified_reserved_fields.append(get_modified_reserved_fields(code.instructions))
                self._modified_fields.append(get_modified_fields(code.instructions, code.argument_fields))
            self._concurrent_ingress_process = Process(target=self.run_concurrent_ingress)
            self._concurrent_egress_process = Process(target=self.run_concurrent_egress)
            self._metadata_queue = Queue()
            self._is_concurrent_enabled = True
        else:
            raise RuntimeError()

    def start(self):
        for instruction_pipeline in self._instruction_pipelines:
            instruction_pipeline.start()

        if self._is_atomic_enabled:
            self._atomic_process.start()
        elif self._is_sequential_enabled:
            self._sequential_ingress_process.start()
            self._sequential_egress_process.start()
        elif self._is_concurrent_enabled:
            self._concurrent_ingress_process.start()
            self._concurrent_egress_process.start()
        else:
            raise RuntimeError()

    def stop(self):
        self.input_interface.put(None)
        for instruction_pipeline in self._instruction_pipelines:
            instruction_pipeline.stop()

        if self._is_atomic_enabled:
            self._atomic_process.join()
        elif self._is_sequential_enabled:
            self._metadata_queue.put(None)
            self._sequential_ingress_process.join()
            self._sequential_egress_process.join()
        elif self._is_concurrent_enabled:
            self._metadata_queue.put(None)
            self._concurrent_ingress_process.join()
            self._concurrent_egress_process.join()
        else:
            raise RuntimeError()

    def run_atomic(self):
        instruction_pipeline = self._instruction_pipelines[0]

        while True:
            try:
                state = self.input_interface.get()

                # print 'atomic_group_process'

                if state is None:
                    return

                ''' Save the current header '''
                header = state.header

                state.header = Header()
                for field in self._code.argument_fields:
                    state.header[field] = header[field]
                for field in get_reserved_fields():
                    state.header[field] = header[field]

                ''' Process the pipeline '''
                instruction_pipeline.put(state)
                state = instruction_pipeline.get()

                ''' Commit changes to the current header '''
                for field in self._code.argument_fields:
                    header[field] = state.header[field]
                for field in get_reserved_fields():
                    header[field] = state.header[field]

                state.header = header

                self.output_interfaces[state.label].put(state)
            except KeyboardInterrupt:
                break

    def run_sequential_ingress(self):
        instruction_pipeline = self._instruction_pipelines[0]

        while True:
            try:
                state = self.input_interface.get()

                # print 'sequential_group_ingress_process'

                if state is None:
                    return

                ''' Save the current header '''
                header = state.header

                self._metadata_queue.put(header)

                state.header = Header()
                for field in self._code.argument_fields:
                    state.header[field] = header[field]
                for field in get_reserved_fields():
                    state.header[field] = header[field]

                instruction_pipeline.put(state)
            except KeyboardInterrupt:
                break

    def run_sequential_egress(self):
        instruction_pipeline = self._instruction_pipelines[0]

        while True:
            try:
                header = self._metadata_queue.get()

                # print 'sequential_group_egress_process'

                if header is None:
                    return

                state = instruction_pipeline.get()

                ''' Commit changes to the original header '''
                for field in self._code.argument_fields:
                    header[field] = state.header[field]
                for field in get_reserved_fields():
                    header[field] = state.header[field]

                state.header = header

                self.output_interfaces[state.label].put(state)
            except KeyboardInterrupt:
                break

    def run_concurrent_ingress(self):
        while True:
            try:
                state = self.input_interface.get()

                # print 'concurrent_group_ingress_process'

                if state is None:
                    return

                ''' Save the current header '''
                header = state.header

                self._metadata_queue.put(state)

                for i in range(len(self._instruction_pipelines)):
                    state.header = Header()
                    for field in self._codes[i].argument_fields:
                        state.header[field] = header[field]
                    for field in get_reserved_fields():
                        state.header[field] = header[field]

                    self._instruction_pipelines[i].put(state)
            except KeyboardInterrupt:
                break

    def run_concurrent_egress(self):
        while True:
            try:
                state = self._metadata_queue.get()

                # print 'concurrent_group_egress_process'

                if state is None:
                    return

                for i in range(len(self._instruction_pipelines)):
                    _state = self._instruction_pipelines[i].get()

                    ''' Commit changes to the original header '''
                    # Note: we assume that fields and locations are unique across different legs of CNC
                    for field in self._modified_fields[i]:
                        state.header[field] = _state.header[field]
                    for field in self._modified_reserved_fields[i]:
                        state.header[field] = _state.header[field]
                    for location in self._modified_locations[i]:
                        offset_value = location.offset.value
                        length_value = location.length.value
                        state.header.packet[offset_value.value:(offset_value.value + length_value.value)] = \
                            _state.packet[offset_value.value:(offset_value.value + length_value.value)]

                self.output_interfaces[state.label].put(state)
            except KeyboardInterrupt:
                break
コード例 #22
0
ファイル: benchmark.py プロジェクト: uckelman/vbench
def magic_memit(ns, line='', repeat=1, timeout=None, run_in_place=True):
    """Measure memory usage of a Python statement

    Usage, in line mode:
      %memit [-ir<R>t<T>] statement

    Options:
    -r<R>: repeat the loop iteration <R> times and take the best result.
    Default: 3

    -i: run the code in the current environment, without forking a new process.
    This is required on some MacOS versions of Accelerate if your line contains
    a call to `np.dot`.

    -t<T>: timeout after <T> seconds. Unused if `-i` is active. Default: None

    Examples
    --------
    ::

      In [1]: import numpy as np

      In [2]: %memit np.zeros(1e7)
      maximum of 3: 76.402344 MB per loop

      In [3]: %memit np.ones(1e6)
      maximum of 3: 7.820312 MB per loop

      In [4]: %memit -r 10 np.empty(1e8)
      maximum of 10: 0.101562 MB per loop

      In [5]: memit -t 3 while True: pass;
      Subprocess timed out.
      Subprocess timed out.
      Subprocess timed out.
      ERROR: all subprocesses exited unsuccessfully. Try again with the `-i`
      option.
      maximum of 3: -inf MB per loop

    """
    if repeat < 1:
        repeat == 1
    if timeout <= 0:
        timeout = None

    # Don't depend on multiprocessing:
    try:
        import multiprocessing as pr
        from multiprocessing.queues import SimpleQueue
        q = SimpleQueue()
    except ImportError:
        class ListWithPut(list):
            "Just a list where the `append` method is aliased to `put`."
            def put(self, x):
                self.append(x)
        q = ListWithPut()
        print ('WARNING: cannot import module `multiprocessing`. Forcing the'
               '`-i` option.')
        run_in_place = True

    def _get_usage(q, stmt, setup='pass', ns={}):
        from memory_profiler import memory_usage as _mu
        try:
            exec setup in ns
            _mu0 = _mu()[0]
            exec stmt in ns
            _mu1 = _mu()[0]
            q.put(_mu1 - _mu0)
        except Exception as e:
            q.put(float('-inf'))
            raise e

    if run_in_place:
        for _ in xrange(repeat):
            _get_usage(q, line, ns=ns)
    else:
        # run in consecutive subprocesses
        at_least_one_worked = False
        for _ in xrange(repeat):
            p = pr.Process(target=_get_usage, args=(q, line, 'pass', ns))
            p.start()
            p.join(timeout=timeout)
            if p.exitcode == 0:
                at_least_one_worked = True
            else:
                p.terminate()
                if p.exitcode == None:
                    print 'Subprocess timed out.'
                else:
                    print 'Subprocess exited with code %d.' % p.exitcode
                q.put(float('-inf'))

        if not at_least_one_worked:
            raise RuntimeError('ERROR: all subprocesses exited unsuccessfully.'
                               ' Try again with the `-i` option.')

    usages = [q.get() for _ in xrange(repeat)]
    usage = max(usages)
    return usage
コード例 #23
0
class DataLoaderIter(object):
    "Iterates once over the DataLoader's dataset, as specified by the sampler"

    def __init__(self, loader):
        self.dataset = loader.dataset
        self.collate_fn = loader.collate_fn
        self.batch_sampler = loader.batch_sampler
        self.num_workers = loader.num_workers
        self.pin_memory = loader.pin_memory
        self.done_event = threading.Event()

        self.sample_iter = iter(self.batch_sampler)

        if self.num_workers > 0:
            self.index_queue = SimpleQueue()
            self.data_queue = SimpleQueue()
            self.batches_outstanding = 0
            self.shutdown = False
            self.send_idx = 0
            self.rcvd_idx = 0
            self.reorder_dict = {}

            self.workers = [
                multiprocessing.Process(target=_worker_loop,
                                        args=(self.dataset, self.index_queue,
                                              self.data_queue,
                                              self.collate_fn))
                for _ in range(self.num_workers)
            ]

            for w in self.workers:
                w.daemon = True  # ensure that the worker exits on process exit
                w.start()

            if self.pin_memory:
                in_data = self.data_queue
                self.data_queue = queue.Queue()
                self.pin_thread = threading.Thread(target=_pin_memory_loop,
                                                   args=(in_data,
                                                         self.data_queue,
                                                         self.done_event))
                self.pin_thread.daemon = True
                self.pin_thread.start()

            # prime the prefetch loop
            for _ in range(2 * self.num_workers):
                self._put_indices()
        else:
            if hasattr(self.dataset, 'build'):
                # Run the build method for the dataset
                self.dataset.build()

    def __len__(self):
        return len(self.batch_sampler)

    def __next__(self):
        if self.num_workers == 0:  # same-process loading
            indices = next(self.sample_iter)  # may raise StopIteration
            batch = self.collate_fn([self.dataset[i] for i in indices])
            if self.pin_memory:
                batch = pin_memory_batch(batch)
            return batch

        # check if the next sample has already been generated
        if self.rcvd_idx in self.reorder_dict:
            batch = self.reorder_dict.pop(self.rcvd_idx)
            return self._process_next_batch(batch)

        if self.batches_outstanding == 0:
            self._shutdown_workers()
            raise StopIteration

        while True:
            assert (not self.shutdown and self.batches_outstanding > 0)
            idx, batch = self.data_queue.get()
            self.batches_outstanding -= 1
            if idx != self.rcvd_idx:
                # store out-of-order samples
                self.reorder_dict[idx] = batch
                continue
            return self._process_next_batch(batch)

    next = __next__  # Python 2 compatibility

    def __iter__(self):
        return self

    def _put_indices(self):
        assert self.batches_outstanding < 2 * self.num_workers
        indices = next(self.sample_iter, None)
        if indices is None:
            return
        self.index_queue.put((self.send_idx, indices))
        self.batches_outstanding += 1
        self.send_idx += 1

    def _process_next_batch(self, batch):
        self.rcvd_idx += 1
        self._put_indices()
        if isinstance(batch, ExceptionWrapper):
            raise batch.exc_type(batch.exc_msg)
        return batch

    def __getstate__(self):
        # TODO: add limited pickling support for sharing an iterator
        # across multiple threads for HOGWILD.
        # Probably the best way to do this is by moving the sample pushing
        # to a separate thread and then just sharing the data queue
        # but signalling the end is tricky without a non-blocking API
        raise NotImplementedError("DataLoaderIterator cannot be pickled")

    def _shutdown_workers(self):
        if not self.shutdown:
            self.shutdown = True
            self.done_event.set()
            for _ in self.workers:
                self.index_queue.put(None)

    def __del__(self):
        if self.num_workers > 0:
            self._shutdown_workers()
コード例 #24
0
ファイル: arbiter.py プロジェクト: wong2/larus
class Arbiter(object):

    SIG_NAMES = dict(
        (getattr(signal, 'SIG%s' % name), name.lower())
            for name in 'TTIN TTOU TERM USR2'.split()
    )
    SIGNALS = SIG_NAMES.keys()

    def __init__(self, app, config):
        self.app = app
        self.config = config
        self.workers = []
        self.setup()

    def setup(self):
        self.pid = os.getpid()
        self.worker_nums = self.config['workers']
        self.worker_class = SyncWorker
        self.queue = SimpleQueue()
        self.setup_logger()
        self.setup_signals()

        addresses = self.config['binds']
        self.sockets = create_sockets(addresses, self.logger)

        addresses_str = ', '.join(map(format_addr_str, addresses))
        self.logger.info('Arbiter booted')
        self.logger.info('Listening on: %s (%s)', addresses_str, self.pid)
        self.logger.info('Using worker: %s', self.worker_class)

    def setup_logger(self):
        logging.basicConfig(level=logging.DEBUG,
                            format='%(asctime)-15s [%(process)d] [%(levelname)s] %(message)s',
                            handlers=[logging.StreamHandler()])
        self.logger = logging.getLogger(__name__)

    def setup_signals(self):
        [signal.signal(sig, self.handle_signal) for sig in self.SIGNALS]

    def handle_signal(self, signum, frame):
        self.queue.put(signum)

    def run(self):
        self.spawn_workers()
        while True:
            try:
                signum = self.queue.get() # blocking
                signame = self.SIG_NAMES.get(signum)
                handler = getattr(self, 'handle_%s' % signame, None)
                if not handler:
                    self.logger.error('No handler for signal: %s', signame)
                    continue

                self.logger.info('Handling signal: %s', signame)
                handler()
            except KeyboardInterrupt:
                self.stop()

    def stop(self):
        self.logger.info('Stopping')
        for worker in self.workers:
            self.kill_worker(worker)
        for sock in self.sockets:
            sock.close()
        sys.exit(0)

    def spawn_worker(self):
        args = (self.app, self.sockets, self.logger, self.config)
        return Process(target=self.worker_class.create, args=args)

    def spawn_workers(self):
        diff = self.worker_nums - len(self.workers)
        for n in range(diff):
            worker = self.spawn_worker()
            self.workers.append(worker)
            worker.start()
            self.logger.info('Botting worker: %s', worker.pid)

    def kill_worker(self, worker):
        self.logger.info('Killing worker: %s' % worker.pid)
        worker.terminate()
        worker.join()

    def handle_ttin(self):
        self.worker_nums += 1
        self.spawn_workers()

    def handle_ttou(self):
        if self.worker_nums <= 1:
            return

        worker = self.workers.pop(0)
        self.kill_worker(worker)
        self.worker_nums -= 1
コード例 #25
0
class Table(Process):
    def __init__(self, patterns):
        super(Table, self).__init__()

        self.patterns = patterns
        self.input_interface = Queue()
        self.output_interfaces = {'': Queue()}

    def stop(self):
        self.input_interface.put(None)
        self.join()

    def run(self):
        while True:
            try:
                data = self.input_interface.get()

                if data is None:
                    return

                operation, items = data

                if operation == 'add_entry':
                    index, entry = items
                    self.patterns.add_entry(index, entry)
                elif operation == 'del_entry':
                    index = items
                    self.patterns.del_entry(index)
                elif operation == 'query_entry':
                    index = items
                    entry = self.patterns.query_entry(index)
                    self.output_interfaces[''].put(entry)
                elif operation == 'write':
                    index, pattern = items
                    self.patterns[index] = pattern
                elif operation == 'read':
                    index, instruction_id = items
                    pattern = self.patterns[index]
                    self.output_interfaces[instruction_id].put(pattern)
                elif operation == 'lookup':
                    values, instruction_id = items
                    value = -1
                    if isinstance(self.patterns, MatchPatterns):
                        for i in range(0, len(self.patterns)):
                            pattern_list = self.patterns[i].values()

                            if len(pattern_list) != len(values):
                                raise RuntimeError()

                            if all(map((lambda (value, mask), source: value.value == (source.value & mask)),
                                       pattern_list, values)):
                                value = i
                                break
                    elif isinstance(self.patterns, SimplePatterns):
                        for i in range(0, len(self.patterns)):
                            pattern_list = self.patterns[i].values()

                            if all(map((lambda value, source: value.value == source.value),
                                       pattern_list, values)):
                                value = i
                                break
                    else:
                        raise RuntimeError()
                    self.output_interfaces[instruction_id].put(value)
                else:
                    raise RuntimeError()
            except KeyboardInterrupt:
                break
コード例 #26
0
class PrimitiveProcess(Process):
    def __init__(self, instruction):
        super(PrimitiveProcess, self).__init__()
        # self.daemon = True

        self._instruction = instruction
        self.input_interface = Queue()
        self.output_interfaces = {}

        self._run = None
        if isinstance(self._instruction, I.ID):
            self._run = execute_ID
        elif isinstance(self._instruction, I.DRP):
            self._run = partial(execute_DRP,
                                reason=self._instruction.reason)
        elif isinstance(self._instruction, I.CTR):
            self._run = partial(execute_CTR,
                                reason=self._instruction.reason)
        elif isinstance(self._instruction, I.ADD):
            self._run = partial(execute_ADD,
                                field=self._instruction.field,
                                size=self._instruction.size)
        elif isinstance(self._instruction, I.RMV):
            self._run = partial(execute_RMV,
                                field=self._instruction.field)
        elif isinstance(self._instruction, I.LD):
            self._run = partial(execute_LD,
                                destination=self._instruction.destination,
                                source=self._instruction.source)
        elif isinstance(self._instruction, I.ST):
            self._run = partial(execute_ST,
                                location=self._instruction.location,
                                source=self._instruction.source)
        elif isinstance(self._instruction, I.OP):
            self._run = partial(execute_OP,
                                destination=self._instruction.destination,
                                left_source=self._instruction.left_source,
                                operator=self._instruction.operator,
                                right_source=self._instruction.right_source)
        elif isinstance(self._instruction, I.PUSH):
            self._run = partial(execute_PUSH,
                                location=self._instruction.location)
        elif isinstance(self._instruction, I.POP):
            self._run = partial(execute_POP,
                                location=self._instruction.location)
        elif isinstance(self._instruction, I.BR):
            self._run = partial(execute_BR,
                                left_source=self._instruction.left_source,
                                operator=self._instruction.operator,
                                right_source=self._instruction.right_source,
                                label=self._instruction.label)
        elif isinstance(self._instruction, I.JMP):
            self._run = partial(execute_JMP,
                                label=self._instruction.label)
        elif isinstance(self._instruction, I.LBL):
            self._run = execute_LBL
        elif isinstance(self._instruction, I.LDt):
            self.table_interface = TableInterface()
            self._run = partial(execute_LDt,
                                table_interface=self.table_interface,
                                instruction=self._instruction,
                                destinations=self._instruction.destinations,
                                index=self._instruction.index)
        elif isinstance(self._instruction, I.STt):
            self.table_interface = TableInterface()
            self._run = partial(execute_STt,
                                table_interface=self.table_interface,
                                instruction=self._instruction,
                                index=self._instruction.index,
                                sources=self._instruction.sources)
        elif isinstance(self._instruction, I.INCt):
            self.table_interface = TableInterface()
            self._run = partial(execute_INCt,
                                table_interface=self.table_interface,
                                instruction=self._instruction,
                                index=self._instruction.index)
        elif isinstance(self._instruction, I.LKt):
            self.table_interface = TableInterface()
            self._run = partial(execute_LKt,
                                table_interface=self.table_interface,
                                instruction=self._instruction,
                                index=self._instruction.index,
                                sources=self._instruction.sources)
        elif isinstance(self._instruction, I.CRC):
            self._run = partial(execute_CRC,
                                destination=self._instruction.destination,
                                sources=self._instruction.sources)
        elif isinstance(self._instruction, I.HSH):
            self._run = partial(execute_HSH,
                                destination=self._instruction.destination,
                                sources=self._instruction.sources)
        elif isinstance(self._instruction, I.HLT):
            self._run = execute_HLT
        else:
            raise RuntimeError()

    def stop(self):
        self.input_interface.put(None)
        self.join()

    def run(self):
        while True:
            try:
                state = self.input_interface.get()

                # print 'primitive_process'

                if state is None:
                    return

                state = self._run(state)

                self.output_interfaces[state.label].put(state)
            except KeyboardInterrupt:
                break
コード例 #27
0
class Pipeline:
    def __init__(self, instructions, tables):
        self.instructions = instructions
        self.tables = tables

        self._input_interface = None
        self._output_interface = Queue()
        self._instructions = {}
        self._is_setup = False
        self._is_start = False

        self._setup()

    def _setup(self):
        flow_graph = cfg.generate(self.instructions)

        ''' Setting connections with in basic blocks '''
        for label, node in flow_graph.iteritems():
            if label == syntax.Label('$entry') or label == syntax.Label('$exit'):
                continue
            next_instruction = None
            for instruction in node.basic_block[::-1]:
                if (isinstance(instruction, I.ATM) or isinstance(instruction, I.SEQ) or
                        isinstance(instruction, I.CNC)):
                    self._instructions[instruction] = GroupProcess(instruction, self.tables)
                else:
                    self._instructions[instruction] = PrimitiveProcess(instruction)

                if next_instruction:
                    self._instructions[instruction].output_interfaces[syntax.Label('')] = \
                        self._instructions[next_instruction].input_interface

                next_instruction = instruction

        ''' Setting up connections across basic blocks '''
        for label, node in flow_graph.iteritems():
            last_instruction = node.basic_block[-1]
            for successor_label in node.successors:
                first_instruction = flow_graph[successor_label].basic_block[0]

                if label == syntax.Label('$entry'):
                    # if successor_label == Label('$exit'):
                    # self.input_interface = self.output_interface
                    # else:
                    self._input_interface = self._instructions[first_instruction].input_interface
                    # Note: this should always be no more than one
                else:
                    if successor_label == syntax.Label('$exit'):
                        self._instructions[last_instruction].output_interfaces[
                            syntax.Label('')] = self._output_interface
                    else:
                        if isinstance(last_instruction, I.BR) or isinstance(last_instruction, I.JMP):
                            if last_instruction.label == successor_label:
                                self._instructions[last_instruction].output_interfaces[last_instruction.label] = \
                                    self._instructions[first_instruction].input_interface
                            else:
                                self._instructions[last_instruction].output_interfaces[syntax.Label('')] = \
                                    self._instructions[first_instruction].input_interface
                        else:
                            self._instructions[last_instruction].output_interfaces[syntax.Label('')] = \
                                self._instructions[first_instruction].input_interface

        ''' Connect tables with instructions '''
        for instruction in self._instructions:
            if isinstance(instruction, I.LDt):
                table = self.tables[instruction.table_id]
                self._instructions[instruction].table_interface.output_interface = table.input_interface
                table.output_interfaces[id(instruction)] = \
                    self._instructions[instruction].table_interface.input_interface
            elif isinstance(instruction, I.STt):
                table = self.tables[instruction.table_id]
                self._instructions[instruction].table_interface.output_interface = table.input_interface
                table.output_interfaces[id(instruction)] = \
                    self._instructions[instruction].table_interface.input_interface
            elif isinstance(instruction, I.INCt):
                table = self.tables[instruction.table_id]
                self._instructions[instruction].table_interface.output_interface = table.input_interface
                table.output_interfaces[id(instruction)] = \
                    self._instructions[instruction].table_interface.input_interface
            elif isinstance(instruction, I.LKt):
                table = self.tables[instruction.table_id]
                self._instructions[instruction].table_interface.output_interface = table.input_interface
                table.output_interfaces[id(instruction)] = \
                    self._instructions[instruction].table_interface.input_interface

        self._is_setup = True

    def start(self):
        if not self._is_setup:
            raise RuntimeError()

        ''' Start the instructions '''
        for instruction in self._instructions:
            self._instructions[instruction].start()

        self._is_start = True

    def stop(self):
        if not self._is_start:
            raise RuntimeError()

        ''' Stop the instructions '''
        for instruction in self._instructions:
            self._instructions[instruction].stop()

        self._is_start = False

    def put(self, state):
        if not self._is_start:
            raise RuntimeError()

        if not state.header:
            pass

        self._input_interface.put(state)

    def get(self):
        if not self._is_start:
            raise RuntimeError()

        return self._output_interface.get()
コード例 #28
0
    predictions = tf.argmax(logits, 1)
    probabilities = tf.nn.softmax(logits)

    init_fn = slim.assign_from_checkpoint_fn(
        'checkpoints/inception_v4.ckpt',
        slim.get_model_variables('InceptionV4'))

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)
    init_fn(sess)
    img_cnt = 0
    img_tlt = lines.size

    while img_cnt < img_tlt:
        processed_images, label = img_q.get()
        if fetch_test:
            img_cnt += 1
            sys.stdout.write('\r{:07d}/{:07d}'.format(img_cnt, img_tlt))
            sys.stdout.flush()
            continue
        pred, prob = sess.run([predictions, probabilities],
                              feed_dict={eval_inputs: processed_images})
        prob = prob[0, 0:]
        sorted_inds = [
            i[0] for i in sorted(enumerate(-prob), key=lambda x: x[1])
        ]

        top1 = sorted_inds[0]
        top5 = sorted_inds[0:5]
        if label == top1:
コード例 #29
0
    settings_dict[CamPrm.framerate_free] = 80
    cmd_queue = SimpleQueue()
    img_queue = SimpleQueue()
    status_queue = SimpleQueue()

    vid_writer_process =\
    VideoProcess(img_queue,cmd_queue,status_queue)
    cmd_queue.put((Command.new_settings, settings_dict))

    #Run on current process
    vid_writer_process.start()
    #while 1:
    #    if not status_queue.empty():
    #        cmd,data = status_queue.get()
    #        print cmd
    #        if cmd == Command.camera_connected:
    #            break

    cmd_queue.put((Command.record, ('test.avi', bounds)))
    time_start = time.time()
    timeout = 20

    while time.time() < time_start + timeout:
        if not img_queue.empty():
            img = img_queue.get()
        if not status_queue.empty():
            status, data = status_queue.get()
    cmd_queue.put((Command.terminate, None))
    time.sleep(3)
    vid_writer_process.terminate()
コード例 #30
0
def spawn_import_clients(options, files_info):
    # Spawn one reader process for each db.table, as well as many client processes
    task_queue = SimpleQueue()
    error_queue = SimpleQueue()
    exit_event = multiprocessing.Event()
    interrupt_event = multiprocessing.Event()
    errors = []
    reader_procs = []
    client_procs = []

    parent_pid = os.getpid()
    signal.signal(signal.SIGINT, lambda a, b: abort_import(a, b, parent_pid, exit_event, task_queue, client_procs, interrupt_event))

    try:
        progress_info = []
        rows_written = multiprocessing.Value(ctypes.c_longlong, 0)

        for i in xrange(options["clients"]):
            client_procs.append(multiprocessing.Process(target=client_process,
                                                        args=(options["host"],
                                                              options["port"],
                                                              options["auth_key"],
                                                              task_queue,
                                                              error_queue,
                                                              rows_written,
                                                              options["force"],
                                                              options["durability"])))
            client_procs[-1].start()

        for file_info in files_info:
            progress_info.append((multiprocessing.Value(ctypes.c_longlong, -1), # Current lines/bytes processed
                                  multiprocessing.Value(ctypes.c_longlong, 0))) # Total lines/bytes to process
            reader_procs.append(multiprocessing.Process(target=table_reader,
                                                        args=(options,
                                                              file_info,
                                                              task_queue,
                                                              error_queue,
                                                              progress_info[-1],
                                                              exit_event)))
            reader_procs[-1].start()

        # Wait for all reader processes to finish - hooray, polling
        while len(reader_procs) > 0:
            time.sleep(0.1)
            # If an error has occurred, exit out early
            if not error_queue.empty():
                exit_event.set()
            reader_procs = [proc for proc in reader_procs if proc.is_alive()]
            update_progress(progress_info)

        # Wait for all clients to finish
        alive_clients = sum([client.is_alive() for client in client_procs])
        for i in xrange(alive_clients):
            task_queue.put("exit")

        while len(client_procs) > 0:
            time.sleep(0.1)
            client_procs = [client for client in client_procs if client.is_alive()]

        # If we were successful, make sure 100% progress is reported
        if error_queue.empty() and not interrupt_event.is_set():
            print_progress(1.0)

        def plural(num, text):
            return "%d %s%s" % (num, text, "" if num == 1 else "s")

        # Continue past the progress output line
        print("")
        print("%s imported in %s" % (plural(rows_written.value, "row"),
                                     plural(len(files_info), "table")))
    finally:
        signal.signal(signal.SIGINT, signal.SIG_DFL)

    if interrupt_event.is_set():
        raise RuntimeError("Interrupted")

    if not task_queue.empty():
        error_queue.put((RuntimeError, RuntimeError("Error: Items remaining in the task queue"), None))

    if not error_queue.empty():
        # multiprocessing queues don't handling tracebacks, so they've already been stringified in the queue
        while not error_queue.empty():
            error = error_queue.get()
            print("%s" % error[1], file=sys.stderr)
            if options["debug"]:
                print("%s traceback: %s" % (error[0].__name__, error[2]), file=sys.stderr)
            if len(error) == 4:
                print("In file: %s" % error[3], file=sys.stderr)
        raise RuntimeError("Errors occurred during import")
コード例 #31
0
def run_clients(options, workingDir, db_table_set):
    # Spawn one client for each db.table, up to options.clients at a time
    exit_event = multiprocessing.Event()
    processes = []
    if six.PY3:
        ctx = multiprocessing.get_context(multiprocessing.get_start_method())
        error_queue = SimpleQueue(ctx=ctx)
    else:
        error_queue = SimpleQueue()
    interrupt_event = multiprocessing.Event()
    sindex_counter = multiprocessing.Value(ctypes.c_longlong, 0)
    hook_counter = multiprocessing.Value(ctypes.c_longlong, 0)

    signal.signal(signal.SIGINT,
                  lambda a, b: abort_export(a, b, exit_event, interrupt_event))
    errors = []

    try:
        progress_info = []
        arg_lists = []
        for db, table in db_table_set:

            tableSize = int(
                options.retryQuery(
                    "count",
                    query.db(db).table(table).info()
                    ['doc_count_estimates'].sum()))

            progress_info.append(
                (multiprocessing.Value(ctypes.c_longlong, 0),
                 multiprocessing.Value(ctypes.c_longlong, tableSize)))
            arg_lists.append((
                db,
                table,
                workingDir,
                options,
                error_queue,
                progress_info[-1],
                sindex_counter,
                hook_counter,
                exit_event,
            ))

        # Wait for all tables to finish
        while processes or arg_lists:
            time.sleep(0.1)

            while not error_queue.empty():
                exit_event.set()  # Stop immediately if an error occurs
                errors.append(error_queue.get())

            processes = [
                process for process in processes if process.is_alive()
            ]

            if len(processes) < options.clients and len(arg_lists) > 0:
                new_process = multiprocessing.Process(target=export_table,
                                                      args=arg_lists.pop(0))
                new_process.start()
                processes.append(new_process)

            update_progress(progress_info, options)

        # If we were successful, make sure 100% progress is reported
        # (rows could have been deleted which would result in being done at less than 100%)
        if len(errors
               ) == 0 and not interrupt_event.is_set() and not options.quiet:
            utils_common.print_progress(1.0, indent=4)

        # Continue past the progress output line and print total rows processed
        def plural(num, text, plural_text):
            return "%d %s" % (num, text if num == 1 else plural_text)

        if not options.quiet:
            print(
                "\n    %s exported from %s, with %s, and %s" %
                (plural(sum([max(0, info[0].value)
                             for info in progress_info]), "row",
                        "rows"), plural(len(db_table_set), "table", "tables"),
                 plural(sindex_counter.value, "secondary index",
                        "secondary indexes"),
                 plural(hook_counter.value, "hook function",
                        "hook functions")))
    finally:
        signal.signal(signal.SIGINT, signal.SIG_DFL)

    if interrupt_event.is_set():
        raise RuntimeError("Interrupted")

    if len(errors) != 0:
        # multiprocessing queues don't handle tracebacks, so they've already been stringified in the queue
        for error in errors:
            print("%s" % error[1], file=sys.stderr)
            if options.debug:
                print("%s traceback: %s" % (error[0].__name__, error[2]),
                      file=sys.stderr)
        raise RuntimeError("Errors occurred during export")
コード例 #32
0
class CameraSettingsTestCase(BaseQWidgetTestCase):

    def setUp(self):

        super(CameraSettingsTestCase,self).setUp()
        self.test_ard_cmd_queue = SimpleQueue()
        self.test_img_cmd_queue = SimpleQueue()
        self.mock_cfg = MockCFG()
        self.mock_nemacquire = MockNemacquire()
        self.camera_settings_widget = CameraSettings(
                                self.mock_nemacquire,
                                self.test_ard_cmd_queue,
                                self.test_img_cmd_queue,
                                self.mock_cfg,
                                250)        


    def tearDown(self):
        super(CameraSettingsTestCase,self).tearDown()

    def test_reset(self):
        #check if reset action resets camera_settings_values to cfg values
        self.camera_settings_widget.initialize_fields()
        self.mock_cfg_old = self.mock_cfg
        self.camera_settings_widget.reset_fields()
        assert is_variable_dict_equivalent(self.mock_cfg_old, self.mock_cfg)

    def test_save(self):
        #test all values are set correctly after a save

        #Value Change
        self.camera_settings_widget.save()
        cmd, data = self.test_img_cmd_queue.get()
        results_dict = self.camera_diff_physical_values(data)
        self.camera_settings_widget.verifySettings(results_dict)
        assert is_dict_equivalent(results_dict,
                self.camera_settings_widget.settings_dict)
        
        #Type change - reset to old value if type conversion fails
        self.camera_settings_widget.save()
        cmd, data = self.test_img_cmd_queue.get()
        results_dict = self.camera_diff_physical_values(data,
                            diffType = "type change")
        self.camera_settings_widget.verifySettings(results_dict)
        assert is_dict_equivalent(results_dict,
                self.camera_settings_widget.settings_dict)

        #Missing value - keep old values if there is no corresponding key

    def test_save_error(self):
        #error during saving process
        #how to test?
        return True


    def camera_diff_physical_values(self,cur_settings_dict,
    diffType = "numerical change"):

        result_dict = {}
        if diffType == "type change":
            
            for key,value in cur_settings_dict.iteritems():

                if isinstance(value,bool):
                    result_dict[key] = None

                else:
                    result_dict[key] = None
        
        if diffType == "numerical change":
            
            for key,value in cur_settings_dict.iteritems():

                if isinstance(value,bool):
                    result_dict[key] = not value

                else:
                    result_dict[key] = value + 1

        if diffType == "missing values":
            pass


        return result_dict
        #send different values than what came in

    def check_type_of_settings(self):
        #check type of all settings in cfg and camera settings widget 
        return (
        isinstance(self.camera_settings_widget.settings_dict[CamPrm.triggered],int)
        
        and isinstance(self.camera_settings_widget.settings_dict[CamPrm.framerate_div],int)
        and isinstance(self.camera_settings_widget.settings_dict[CamPrm.exposure],int)
        and isinstance(self.camera_settings_widget.settings_dict[CamPrm.white_balance],bool)
        and isinstance(self.camera_settings_widget.settings_dict[CamPrm.framerate_free],int)

        and isinstance(self.camera_settings_widget.cfg.triggered,int)
        and isinstance(self.camera_settings_widget.cfg.framerate_div,int)
        and isinstance(self.camera_settings_widget.cfg.exposure_time,int)
        and isinstance(self.camera_settings_widget.cfg.white_balance,bool)
        and isinstance(self.camera_settings_widget.cfg.framerate_free,int))
コード例 #33
0
def run_clients(options, db_table_set):
    # Spawn one client for each db.table
    exit_event = multiprocessing.Event()
    processes = []
    error_queue = SimpleQueue()
    interrupt_event = multiprocessing.Event()
    stream_semaphore = multiprocessing.BoundedSemaphore(options["clients"])

    signal.signal(signal.SIGINT, lambda a, b: abort_export(a, b, exit_event, interrupt_event))

    try:
        progress_info = []

        for db, table in db_table_set:
            progress_info.append((multiprocessing.Value(ctypes.c_longlong, -1),
                                  multiprocessing.Value(ctypes.c_longlong, 0)))
            processes.append(multiprocessing.Process(target=export_table,
                                                     args=(options["host"],
                                                           options["port"],
                                                           options["auth_key"],
                                                           db, table,
                                                           options["directory_partial"],
                                                           options["fields"],
                                                           options["format"],
                                                           error_queue,
                                                           progress_info[-1],
                                                           stream_semaphore,
                                                           exit_event)))
            processes[-1].start()

        # Wait for all tables to finish
        while len(processes) > 0:
            time.sleep(0.1)
            if not error_queue.empty():
                exit_event.set() # Stop rather immediately if an error occurs
            processes = [process for process in processes if process.is_alive()]
            update_progress(progress_info)

        # If we were successful, make sure 100% progress is reported
        # (rows could have been deleted which would result in being done at less than 100%)
        if error_queue.empty() and not interrupt_event.is_set():
            print_progress(1.0)

        # Continue past the progress output line and print total rows processed
        def plural(num, text):
            return "%d %s%s" % (num, text, "" if num == 1 else "s")

        print("")
        print("%s exported from %s" % (plural(sum([info[0].value for info in progress_info]), "row"),
                                       plural(len(db_table_set), "table")))
    finally:
        signal.signal(signal.SIGINT, signal.SIG_DFL)

    if interrupt_event.is_set():
        raise RuntimeError("Interrupted")

    if not error_queue.empty():
        # multiprocessing queues don't handling tracebacks, so they've already been stringified in the queue
        while not error_queue.empty():
            error = error_queue.get()
            print("%s" % error[1], file=sys.stderr)
            if options["debug"]:
                print("%s traceback: %s" % (error[0].__name__, error[2]), file=sys.stderr)
            raise RuntimeError("Errors occurred during export")
コード例 #34
0
ファイル: resolvers.py プロジェクト: stkonst/PolicyParser
    def _handle_ASes(self):
        """Spawns several processes (based on the available CPUs) to handle the
        AS resolving and creates the necessary objects based on the results.
        """
        # Gather all the ASNs seen through filter and recursive resolving.
        all_ASNs = list((self.recursed_ASes | self.AS_list) - self.black_list)
        all_ASNs_count = len(all_ASNs)
        if all_ASNs_count < 1:
            return

        # We will devote all but one core to resolving since the main process
        # will handle the objects' creation.
        number_of_resolvers = mp.cpu_count() - 1
        if number_of_resolvers < 1:
            number_of_resolvers = 1

        # The list of ASNs is going to be distributed almost equally to the
        # available resolvers.
        if all_ASNs_count >= number_of_resolvers:
            slice_length = int(math.ceil(all_ASNs_count / float(number_of_resolvers)))
        else:
            number_of_resolvers = all_ASNs_count
            slice_length = 1

        result_q = SimpleQueue()  # NOTE: Only works with this queue.
        processes = []
        slice_start = 0
        for i in xrange(number_of_resolvers):
            ASN_batch = all_ASNs[slice_start:slice_start+slice_length]
            processes.append(mp.Process(target=_subprocess_AS_resolving, args=(ASN_batch, result_q)).start())
            slice_start += slice_length

        # PROGRESS START
        # Show progress while running.
        # Can be safely commented out until PROGRESS END.
        aps_count = 0
        aps = 0
        time_start = time.time()
        # PROGRESS END

        done = 0
        while done < all_ASNs_count:
            try:
                asn, routes = result_q.get()
            except Empty:
                # This should never be reached with this queue but left here
                # just in case.
                time.sleep(0.2)
                continue

            # If the AS has routes create the appropriate ASN object and add it
            # to the data pool.
            if routes is not None and (routes['ipv4'] or routes['ipv6']):
                ASN_object = rpsl.ASObject(asn)
                for prefix in routes['ipv4']:
                    route_object = rpsl.RouteObject(prefix, asn)
                    ASN_object.route_obj_dir.append_route_obj(route_object)
                for prefix in routes['ipv6']:
                    route6_object = rpsl.Route6Object(prefix, asn)
                    ASN_object.route_obj_dir.append_route_obj(route6_object)
                self.AS_dir.append_ASN_obj(ASN_object)
            done += 1

        # PROGRESS START
        # Show progress while running.
        # Can be safely commented out until PROGRESS END.
            aps_count += 1
            time_diff = time.time() - time_start
            if time_diff >= 1:
                aps = aps_count / time_diff
                aps_count = 0
                time_start = time.time()
            sys.stdout.write("{} of {} ASes | {:.0f} ASes/s          \r"
                             .format(done, all_ASNs_count, aps))
            sys.stdout.flush()
        print
コード例 #35
0
ファイル: utils.py プロジェクト: pombredanne/jsonextended
        def memit(self, line='', setup='pass'):
            """Measure memory usage of a Python statement

            Usage, in line mode:
              %memit [-ir<R>t<T>] statement

            Options:
            -r<R>: repeat the loop iteration <R> times and take the best result.
            Default: 3

            -i: run the code in the current environment, without forking a new
            process. This is required on some MacOS versions of Accelerate if your
            line contains a call to `np.dot`.

            -t<T>: timeout after <T> seconds. Unused if `-i` is active.
            Default: None

            Examples
            --------
            ::

              In [1]: import numpy as np

              In [2]: %memit np.zeros(1e7)
              maximum of 3: 76.402344 MB per loop

              In [3]: %memit np.ones(1e6)
              maximum of 3: 7.820312 MB per loop

              In [4]: %memit -r 10 np.empty(1e8)
              maximum of 10: 0.101562 MB per loop

              In [5]: memit -t 3 while True: pass;
              Subprocess timed out.
              Subprocess timed out.
              Subprocess timed out.
              ERROR: all subprocesses exited unsuccessfully. Try again with the
              `-i` option.
              maximum of 3: -inf MB per loop

            """
            opts, stmt = self.parse_options(line,
                                            'r:t:i',
                                            posix=False,
                                            strict=False)
            repeat = int(getattr(opts, 'r', 3))
            if repeat < 1:
                repeat == 1
            timeout = int(getattr(opts, 't', 0))
            if timeout <= 0:
                timeout = None
            run_in_place = hasattr(opts, 'i')

            # Don't depend on multiprocessing:
            try:
                import multiprocessing as pr
                from multiprocessing.queues import SimpleQueue
                q = SimpleQueue()
            except ImportError:

                class ListWithPut(list):
                    "Just a list where the `append` method is aliased to `put`."

                    def put(self, x):
                        self.append(x)

                q = ListWithPut()
                print(
                    'WARNING: cannot import module `multiprocessing`. Forcing '
                    'the `-i` option.')
                run_in_place = True

            ns = self.shell.user_ns

            def _get_usage(q, stmt, setup='pass', ns={}):
                try:
                    exec(setup) in ns
                    _mu0 = _mu()[0]
                    exec(stmt) in ns
                    _mu1 = _mu()[0]
                    q.put(_mu1 - _mu0)
                except Exception as e:
                    q.put(float('-inf'))
                    raise e

            if run_in_place:
                for _ in xrange(repeat):
                    _get_usage(q, stmt, ns=ns)
            else:
                # run in consecutive subprocesses
                at_least_one_worked = False
                for _ in xrange(repeat):
                    p = pr.Process(target=_get_usage,
                                   args=(q, stmt, 'pass', ns))
                    p.start()
                    p.join(timeout=timeout)
                    if p.exitcode == 0:
                        at_least_one_worked = True
                    else:
                        p.terminate()
                        if p.exitcode == None:
                            print('Subprocess timed out.')
                        else:
                            print('Subprocess exited with code %d.' %
                                  p.exitcode)
                        q.put(float('-inf'))

                if not at_least_one_worked:
                    print('ERROR: all subprocesses exited unsuccessfully. Try '
                          'again with the `-i` option.')

            usages = [q.get() for _ in xrange(repeat)]
            usage = max(usages)
            print("maximum of %d: %f MB per loop" % (repeat, usage))
コード例 #36
0
def run_clients(options, db_table_set):
    # Spawn one client for each db.table
    exit_event = multiprocessing.Event()
    processes = []
    error_queue = SimpleQueue()
    interrupt_event = multiprocessing.Event()
    stream_semaphore = multiprocessing.BoundedSemaphore(options["clients"])

    signal.signal(signal.SIGINT,
                  lambda a, b: abort_export(a, b, exit_event, interrupt_event))

    try:
        progress_info = []

        for db, table in db_table_set:
            progress_info.append((multiprocessing.Value(ctypes.c_longlong, -1),
                                  multiprocessing.Value(ctypes.c_longlong, 0)))
            processes.append(
                multiprocessing.Process(
                    target=export_table,
                    args=(options["host"], options["port"],
                          options["auth_key"], db, table,
                          options["directory_partial"], options["fields"],
                          options["format"], error_queue, progress_info[-1],
                          stream_semaphore, exit_event)))
            processes[-1].start()

        # Wait for all tables to finish
        while len(processes) > 0:
            time.sleep(0.1)
            if not error_queue.empty():
                exit_event.set()  # Stop rather immediately if an error occurs
            processes = [
                process for process in processes if process.is_alive()
            ]
            update_progress(progress_info)

        # If we were successful, make sure 100% progress is reported
        # (rows could have been deleted which would result in being done at less than 100%)
        if error_queue.empty() and not interrupt_event.is_set():
            print_progress(1.0)

        # Continue past the progress output line and print total rows processed
        def plural(num, text):
            return "%d %s%s" % (num, text, "" if num == 1 else "s")

        print("")
        print("%s exported from %s" %
              (plural(sum([info[0].value for info in progress_info]),
                      "row"), plural(len(db_table_set), "table")))
    finally:
        signal.signal(signal.SIGINT, signal.SIG_DFL)

    if interrupt_event.is_set():
        raise RuntimeError("Interrupted")

    if not error_queue.empty():
        # multiprocessing queues don't handling tracebacks, so they've already been stringified in the queue
        while not error_queue.empty():
            error = error_queue.get()
            print("%s" % error[1], file=sys.stderr)
            if options["debug"]:
                print("%s traceback: %s" % (error[0].__name__, error[2]),
                      file=sys.stderr)
            raise RuntimeError("Errors occurred during export")
コード例 #37
0
def spawn_import_clients(options, files_info):
    # Spawn one reader process for each db.table, as well as many client processes
    task_queue = SimpleQueue()
    error_queue = SimpleQueue()
    exit_event = multiprocessing.Event()
    interrupt_event = multiprocessing.Event()
    errors = []
    reader_procs = []
    client_procs = []

    parent_pid = os.getpid()
    signal.signal(
        signal.SIGINT,
        lambda a, b: abort_import(a, b, parent_pid, exit_event, task_queue,
                                  client_procs, interrupt_event))

    try:
        progress_info = []
        rows_written = multiprocessing.Value(ctypes.c_longlong, 0)

        for i in xrange(options["clients"]):
            client_procs.append(
                multiprocessing.Process(target=client_process,
                                        args=(options["host"], options["port"],
                                              options["auth_key"], task_queue,
                                              error_queue, rows_written,
                                              options["force"],
                                              options["durability"])))
            client_procs[-1].start()

        for file_info in files_info:
            progress_info.append((
                multiprocessing.Value(ctypes.c_longlong,
                                      -1),  # Current lines/bytes processed
                multiprocessing.Value(ctypes.c_longlong,
                                      0)))  # Total lines/bytes to process
            reader_procs.append(
                multiprocessing.Process(target=table_reader,
                                        args=(options, file_info, task_queue,
                                              error_queue, progress_info[-1],
                                              exit_event)))
            reader_procs[-1].start()

        # Wait for all reader processes to finish - hooray, polling
        while len(reader_procs) > 0:
            time.sleep(0.1)
            # If an error has occurred, exit out early
            if not error_queue.empty():
                exit_event.set()
            reader_procs = [proc for proc in reader_procs if proc.is_alive()]
            update_progress(progress_info)

        # Wait for all clients to finish
        alive_clients = sum([client.is_alive() for client in client_procs])
        for i in xrange(alive_clients):
            task_queue.put("exit")

        while len(client_procs) > 0:
            time.sleep(0.1)
            client_procs = [
                client for client in client_procs if client.is_alive()
            ]

        # If we were successful, make sure 100% progress is reported
        if error_queue.empty() and not interrupt_event.is_set():
            print_progress(1.0)

        def plural(num, text):
            return "%d %s%s" % (num, text, "" if num == 1 else "s")

        # Continue past the progress output line
        print("")
        print("%s imported in %s" % (plural(
            rows_written.value, "row"), plural(len(files_info), "table")))
    finally:
        signal.signal(signal.SIGINT, signal.SIG_DFL)

    if interrupt_event.is_set():
        raise RuntimeError("Interrupted")

    if not task_queue.empty():
        error_queue.put(
            (RuntimeError,
             RuntimeError("Error: Items remaining in the task queue"), None))

    if not error_queue.empty():
        # multiprocessing queues don't handling tracebacks, so they've already been stringified in the queue
        while not error_queue.empty():
            error = error_queue.get()
            print("%s" % error[1], file=sys.stderr)
            if options["debug"]:
                print("%s traceback: %s" % (error[0].__name__, error[2]),
                      file=sys.stderr)
            if len(error) == 4:
                print("In file: %s" % error[3], file=sys.stderr)
        raise RuntimeError("Errors occurred during import")