Exemplo n.º 1
0
    def __init__(self, key, task_group, randomize):
        self.key = key
        self.gen_worker = task_group['gen_worker']
        self.task_ids = task_group['task_ids']
        self.is_parallel = task_group['is_parallel']
        if self.is_parallel:
            self.randomize = randomize
            if self.randomize:
                random.shuffle(self.task_ids)
        else:
            self.randomize = False
        self.result_queue = SimpleQueue()
        self.task_queue = SimpleQueue()

        # Don't expose queues file descriptors over Popen to, say, tarantool
        # running tests.
        set_fd_cloexec(self.result_queue._reader.fileno())
        set_fd_cloexec(self.result_queue._writer.fileno())
        set_fd_cloexec(self.task_queue._reader.fileno())
        set_fd_cloexec(self.task_queue._writer.fileno())

        for task_id in self.task_ids:
            self.task_queue.put(task_id)
        self.worker_ids = set()
        self.done = False
        self.done_task_ids = set()
Exemplo n.º 2
0
def _init_queues(n):
    global verbose_output
    verbose_output = SimpleQueue()
    global signals
    signals = SimpleQueue()
    global queues
    queues = [SimpleQueue() for _ in range(n)]
Exemplo n.º 3
0
    def start(self):
        '''Start streaming
        '''
        # signal handling.
        self.signaled = False
        # stores the original signals
        original_sigint = signal.getsignal(signal.SIGINT)
        original_sighup = signal.getsignal(signal.SIGHUP)
        original_sigterm = signal.getsignal(signal.SIGTERM)

        # set the new signal handlers
        signal.signal(signal.SIGINT, lambda s, f: self.set_signal())
        signal.signal(signal.SIGHUP, lambda s, f: self.set_signal())
        signal.signal(signal.SIGTERM, lambda s, f: self.set_signal())

        enable_port_sum = sum([2**i for i in self.ns_commands.keys()
                               ]) + 2**self.s_command[0]
        self.sendcmd("RPER", num=enable_port_sum)
        self.sendcmd("RDDR", num=0)

        self.s_buf = SimpleQueue()
        self.ns_buf = {0: SimpleQueue()}
        for key in self.ns_commands:
            self.ns_buf[key] = SimpleQueue()

        s_fname = self.s_fname if self.s_fname else "{:s}stream.csv".format(
            datetime.now().strftime("%Y%m%d%H%M%S"))
        ns_fname = self.ns_fname if self.ns_fname else "{:s}nostream.csv".format(
            datetime.now().strftime("%Y%m%d%H%M%S"))

        s_fwrite_proc = Process(target = \
            lambda: file_writer(s_fname, self.s_fheader, self.s_fstr, self.s_buf))
        ns_fwrite_proc = Process(target = \
            lambda: file_multi_writer(ns_fname, self.ns_fheader, self.ns_fstr, self.ns_buf))
        sort_proc = Process(target=self.sorter)
        ns_proc = Process(target=self.ns_cmd_sender)

        self.sendcmd("SNDT",
                     self.s_command[0],
                     str_block=makecmd("TPER", num=self.s_tper))
        self.sendcmd("SNDT", self.s_command[0], str_block=self.s_command[1])

        s_fwrite_proc.start()
        ns_fwrite_proc.start()
        sort_proc.start()
        ns_proc.start()

        sort_proc.join()
        ns_proc.join()
        s_fwrite_proc.join()
        ns_fwrite_proc.join()

        del self.s_buf
        del self.ns_buf

        # restore the original handlers
        signal.signal(signal.SIGINT, original_sigint)
        signal.signal(signal.SIGHUP, original_sighup)
        signal.signal(signal.SIGTERM, original_sigterm)
Exemplo n.º 4
0
 def _setup_queues(self):
     from multiprocessing.queues import SimpleQueue
     self._inqueue = SimpleQueue()
     self._outqueue = SimpleQueue()
     self._ackqueue = SimpleQueue()
     self._quick_put = self._inqueue._writer.send
     self._quick_get = self._outqueue._reader.recv
     self._quick_get_ack = self._ackqueue._reader.recv
Exemplo n.º 5
0
 def _setup_queues(self):
     """
         设定用于通信的SimpleQueue
     :return:
     """
     BasePool._setup_queues(self)
     self._get_data_queue = SimpleQueue()
     self._require_data_queue = SimpleQueue()
Exemplo n.º 6
0
    def _setup_queues(self):
        from multiprocessing.queues import SimpleQueue
        self._inqueue = SimpleQueue()
        self._outqueue = SimpleQueue()
        self._quick_put = self._inqueue._writer.send
        self._quick_get = self._outqueue._reader.recv

        def _poll_result(timeout):
            if self._outqueue._reader.poll(timeout):
                return True, self._quick_get()
            return False, None
        self._poll_result = _poll_result
    def setUp(self):

        super(CameraSettingsTestCase,self).setUp()
        self.test_ard_cmd_queue = SimpleQueue()
        self.test_img_cmd_queue = SimpleQueue()
        self.mock_cfg = MockCFG()
        self.mock_nemacquire = MockNemacquire()
        self.camera_settings_widget = CameraSettings(
                                self.mock_nemacquire,
                                self.test_ard_cmd_queue,
                                self.test_img_cmd_queue,
                                self.mock_cfg,
                                250)        
Exemplo n.º 8
0
    def __init__(self, loader):
        self.dataset = loader.dataset
        self.collate_fn = loader.collate_fn
        self.batch_sampler = loader.batch_sampler
        self.num_workers = loader.num_workers
        self.pin_memory = loader.pin_memory
        self.done_event = threading.Event()

        self.sample_iter = iter(self.batch_sampler)

        if self.num_workers > 0:
            self.index_queue = SimpleQueue()
            self.data_queue = SimpleQueue()
            self.batches_outstanding = 0
            self.shutdown = False
            self.send_idx = 0
            self.rcvd_idx = 0
            self.reorder_dict = {}

            self.workers = [
                multiprocessing.Process(target=_worker_loop,
                                        args=(self.dataset, self.index_queue,
                                              self.data_queue,
                                              self.collate_fn))
                for _ in range(self.num_workers)
            ]

            for w in self.workers:
                w.daemon = True  # ensure that the worker exits on process exit
                w.start()

            if self.pin_memory:
                in_data = self.data_queue
                self.data_queue = queue.Queue()
                self.pin_thread = threading.Thread(target=_pin_memory_loop,
                                                   args=(in_data,
                                                         self.data_queue,
                                                         self.done_event))
                self.pin_thread.daemon = True
                self.pin_thread.start()

            # prime the prefetch loop
            for _ in range(2 * self.num_workers):
                self._put_indices()
        else:
            if hasattr(self.dataset, 'build'):
                # Run the build method for the dataset
                self.dataset.build()
Exemplo n.º 9
0
def multi_process_list_with_consumer(data, method, consumerObj, numProcessors,
                                     *args):
    if numProcessors > len(data):
        numProcessors = len(data)
    dataSplit = split_into_sublist(data, numProcessors)
    processes = [None] * numProcessors
    results = [None] * len(data)
    tempRes = SimpleQueue()
    for i in xrange(numProcessors):
        newArgs = (dataSplit[i], ) + args + (tempRes, )
        processes[i] = multiprocessing.Process(target=method, args=newArgs)
        processes[i].start()

    lastPercent = 0
    dataLen = len(data)
    startTime = time.time()
    get = tempRes.get
    for i in xrange(len(results)):
        consumerObj.process(get())
        percentDone = i / dataLen
        if percentDone - lastPercent >= 0.1:
            timeTaken = time.time() - startTime
            timeRemain = short_format_time(
                (dataLen - (i + 1)) / ((i + 1) / timeTaken))
            timeTaken = short_format_time(timeTaken)
            print int(
                percentDone * 100
            ), "percent done | time elapsed:", timeTaken, "| time remaining:", timeRemain
            lastPercent = percentDone
    for p in processes:
        p.join()
    if lastPercent != 100:
        print "100 percent done"
    print "Finished parallel processing list of length", len(results)
    return consumerObj.results()
Exemplo n.º 10
0
def main(data):
    import sys
    from multiprocessing.queues import SimpleQueue
    url_list = []
    try:
        req = urllib2.Request(data[1])
        #res_data = urllib2.urlopen(req)
        kk = 1
    except:
        kk = 0
    if kk == 1:
        url_list.append(data[1])
        queue = SimpleQueue()
        message_type = "test"
        app = QApplication(sys.argv)
        crawler = Crawler(url_list, queue, message_type)
        crawler.crawler_start()

        sys.exit(app.exec_())
        print crawler.title
    else:
        file_name = data[1].replace("/", "_")
        file_object = open(file_name, 'w')
        file_object.write("/*/")
        file_object.close()
Exemplo n.º 11
0
def async_file_reading(fd, callback):
    """Helper which instantiate and run an AsynchronousFileReader."""
    queue = SimpleQueue()
    reader = AsynchronousFileReader(fd, queue)
    reader.start()
    consummer = Process(target=consume_queue, args=(queue, callback))
    consummer.start()
    return (reader, consummer)
Exemplo n.º 12
0
 def __init__(self, key, task_group, randomize):
     self.key = key
     self.gen_worker = task_group['gen_worker']
     self.task_ids = task_group['task_ids']
     self.is_parallel = task_group['is_parallel']
     if self.is_parallel:
         self.randomize = randomize
         if self.randomize:
             random.shuffle(self.task_ids)
     else:
         self.randomize = False
     self.result_queue = SimpleQueue()
     self.task_queue = SimpleQueue()
     for task_id in self.task_ids:
         self.task_queue.put(task_id)
     self.worker_ids = set()
     self.done = False
     self.done_task_ids = set()
Exemplo n.º 13
0
def test_simple_queue():
    q = SimpleQueue()
    input_ = [1, 2, 3, 4, 5, 6]
    from_iterable(consumers.to_simple_queue(q), input_)

    for i in input_:
        o = q.get()
        assert o == i

    assert q.empty()
Exemplo n.º 14
0
    def test_can_pickle_via_queue(self):
        """
        https://github.com/andresriancho/w3af/issues/8748
        """
        sq = SimpleQueue()
        u1 = URL('http://www.w3af.com/')
        sq.put(u1)
        u2 = sq.get()

        self.assertEqual(u1, u2)
Exemplo n.º 15
0
def edjust(ctx, input_folder):
    """Adjust pose of 3D object"""

    rgb, gray, scene = read_input(input_folder)
    integral_calculator = IntegralCalculator(gray,
                                             scene,
                                             normalized_gradient=True)
    model_queue = SimpleQueue()
    process = Process(target=run_optimization,
                      args=(scene.model, integral_calculator, model_queue))
    process.start()
    exit_code = run_gui(sys.argv[:1], rgb, scene, integral_calculator,
                        model_queue)
    process.terminate()
    ctx.exit(exit_code)
Exemplo n.º 16
0
 def __init__(self, max_workers=None):
     _check_system_limits()
     if max_workers is None:
         self._max_workers = multiprocessing.cpu_count()
     else:
         self._max_workers = max_workers
     self._call_queue = multiprocessing.Queue(self._max_workers +
                                              EXTRA_QUEUED_CALLS)
     self._call_queue._ignore_epipe = True
     self._result_queue = SimpleQueue()
     self._work_ids = queue.Queue()
     self._queue_management_thread = None
     self._processes = {}
     self._shutdown_thread = False
     self._shutdown_lock = threading.Lock()
     self._broken = False
     self._queue_count = 0
     self._pending_work_items = {}
Exemplo n.º 17
0
    def prepare_output_flow(self, flow):
#        output_queue = DataFlowQueue()
        if self.deffer_to_process:
            import multiprocessing
            from multiprocessing.queues import SimpleQueue
            output_queue = SimpleQueue()
            
            process = multiprocessing.Process(target=subprocess_output_adapter,
                                              args=(self.__prepare_output_flow(flow),output_queue))
            process.start()
            
            for itemm in iter(GeneratorAdapter(output_queue)):
                yield itemm
            
#            process.join()
        else:
            for itemm in self.__prepare_output_flow(flow):
                yield itemm
Exemplo n.º 18
0
def launch_graph_plot():
    q = SimpleQueue()
    Pyro4.config.HOST="10.1.1.2"
    daemon = Pyro4.Daemon()
    ns = Pyro4.locateNS()
    p = Process(target=_launch_daemon, args=(daemon, q,))
    p.start()
    graph_plot = GraphPlotPanel()
    while True:
        if not q.empty():
            item = q.get()
            if item[0] == 'time':
                print "got queue:", item
                graph_plot.set_time(item[1])
            elif item[0] == 'vertex_color':
                pass
        graph_plot.run()
        fpsClock.tick(60)
Exemplo n.º 19
0
def from_twitter_api(target, endpoint, config):
    """Consume tweets from a Streaming API endpoint."""
    endpoint_to_url = {
        'twitter://sample':
        'https://stream.twitter.com/1.1/statuses/sample.json',
        'twitter://filter':
        'https://stream.twitter.com/1.1/statuses/filter.json',
    }

    if endpoint == 'twitter://filter':
        filter_predicates = config.global_filter.predicates
        kwargs = {
            'follow': filter_predicates['follow'],
            'track': filter_predicates['track'],
            'locations': filter_predicates['locations'],
        }
    else:
        kwargs = {}

    # The communication point of the consumer and producer processes.
    queue = SimpleQueue()

    # Start the consumer first
    consumer = StreamConsumer(queue, target)
    consumer.start()

    # then the producer.
    producer = StreamProducer(twitter_credentials=dict(
        config.items('twitter')),
                              target=consumers.to_simple_queue(queue),
                              url=endpoint_to_url[endpoint],
                              **kwargs)

    producer.start()

    try:
        producer.join()
    finally:
        queue.put(StopIteration)
        consumer.join()
Exemplo n.º 20
0
    def __init__(self, max_workers=None):
        """Initializes a new ProcessPoolExecutor instance.

        Args:
            max_workers: The maximum number of processes that can be used to
                execute the given calls. If None or not given then as many
                worker processes will be created as the machine has processors.
        """
        _check_system_limits()

        if max_workers is None:
            self._max_workers = multiprocessing.cpu_count() or 1
        else:
            if max_workers <= 0:
                raise ValueError("max_workers must be greater than 0")

            self._max_workers = max_workers

        # Make the call queue slightly larger than the number of processes to
        # prevent the worker processes from idling. But don't make it too big
        # because futures in the call queue cannot be cancelled.
        self._call_queue = multiprocessing.Queue(self._max_workers +
                                                 EXTRA_QUEUED_CALLS)
        # Killed worker processes can produce spurious "broken pipe"
        # tracebacks in the queue's own worker thread. But we detect killed
        # processes anyway, so silence the tracebacks.
        self._call_queue._ignore_epipe = True
        self._result_queue = SimpleQueue()
        self._work_ids = queue.Queue()
        self._queue_management_thread = None
        # Map of pids to processes
        self._processes = {}

        # Shutdown is a two-step process.
        self._shutdown_thread = False
        self._shutdown_lock = threading.Lock()
        self._broken = False
        self._queue_count = 0
        self._pending_work_items = {}
Exemplo n.º 21
0
def multi_process_list(data, method, numProcessors, *args):
    if len(data) < 1:
        print "[WARNING]--->Received empty list for multi-processing!"
        return None
    if numProcessors > len(data):
        numProcessors = len(data)
    dataSplit = split_into_sublist(data, numProcessors)
    processes = [None] * numProcessors
    results = [None] * len(data)
    tempRes = SimpleQueue()
    gc.disable()
    for i in xrange(numProcessors):
        newArgs = (dataSplit[i], ) + args + (tempRes, )
        processes[i] = multiprocessing.Process(target=method, args=newArgs)
        processes[i].start()
    lastPercent = 0
    startTime = time.time()
    dataLen = len(data)
    get = tempRes.get
    for i in xrange(len(results)):
        results[i] = get()
        percentDone = i / dataLen
        if percentDone - lastPercent >= 0.1:
            timeTaken = time.time() - startTime
            timeRemain = short_format_time(
                (dataLen - (i + 1)) / ((i + 1) / timeTaken))
            timeTaken = short_format_time(timeTaken)
            print int(
                percentDone * 100
            ), "percent done | time elapsed:", timeTaken, "| time remaining:", timeRemain
            lastPercent = percentDone
    for p in processes:
        p.join()
    if lastPercent != 100:
        print "100 percent done"
    gc.enable()
    print "Finished parallel processing list of length", len(results)
    return results
Exemplo n.º 22
0
    def __init__(self, data_structure, processes, scan_function, init_args,
                 _mp_init_function):
        """ Init the scanner.

        data_structure is a world.DataSet
        processes is the number of child processes to use
        scan_function is the function to use for scanning
        init_args are the arguments passed to the init function
        _mp_init_function is the function used to init the child processes
        """
        assert(isinstance(data_structure, world.DataSet))
        self.data_structure = data_structure
        self.list_files_to_scan = data_structure._get_list()
        self.processes = processes
        self.scan_function = scan_function

        # Queue used by processes to pass results
        self.queue = SimpleQueue()
        init_args.update({'queue': self.queue})
        # NOTE TO SELF: initargs doesn't handle kwargs, only args!
        # Pass a dict with all the args
        self.pool = multiprocessing.Pool(processes=processes,
                initializer=_mp_init_function,
                initargs=(init_args,))

        # TODO: make this automatic amount
        # Recommended time to sleep between polls for results
        self.SCAN_START_SLEEP_TIME = 0.001
        self.SCAN_MIN_SLEEP_TIME = 1e-6
        self.SCAN_MAX_SLEEP_TIME = 0.1
        self.scan_sleep_time = self.SCAN_START_SLEEP_TIME
        self.queries_without_results = 0
        self.last_time = time()
        self.MIN_QUERY_NUM = 1
        self.MAX_QUERY_NUM = 5

        # Holds a friendly string with the name of the last file scanned
        self._str_last_scanned = None
Exemplo n.º 23
0
def main():
    global TCP_SEND_PORT
    global TCP_SEND_IP
    global TCP_RECEIVE_IP
    global TCP_RECEIVE_PORT
    global key_store
    global eventual_requests
    global eventual_write_lock
    global eventual_read_lock
    key_store = {}
    eventual_requests = {}
    eventual_write_lock = threading.Lock()
    eventual_read_lock = threading.Lock()
    signal.signal(signal.SIGINT, signal_handler)
    TCP_RECEIVE_IP = TCP_SEND_IP = socket.gethostbyname(socket.gethostname())
    TCP_SEND_PORT = int(sys.argv[1])
    TCP_RECEIVE_PORT = int(sys.argv[2])
    BUFFER_SIZE = 1024
    listener = threading.Thread(target=listening_thread, args=[BUFFER_SIZE])
    listener.daemon = True
    listener.start()
    message_queue = SimpleQueue()
    worker = threading.Thread(target=worker_thread, args=[message_queue])
    worker.daemon = True
    worker.start()

    while 1:
        command = str(
            raw_input(bcolors.HEADER + bcolors.UNDERLINE + "Enter Message:\n" +
                      bcolors.ENDC))
        messages = []
        if command.endswith('.txt'):
            messages = readFile(command)
        else:
            messages.append(command)
        message_queue.put(messages)
        print bcolors.OKBLUE +  'System time is ' + \
                str(datetime.datetime.now().strftime("%H:%M:%S:%f")) + bcolors.ENDC
Exemplo n.º 24
0
def export_table(host, port, auth_key, db, table, directory, fields, format,
                 error_queue, progress_info, stream_semaphore, exit_event):
    writer = None

    try:
        # This will open at least one connection for each rdb_call_wrapper, which is
        # a little wasteful, but shouldn't be a big performance hit
        conn_fn = lambda: r.connect(host, port, auth_key=auth_key)
        rdb_call_wrapper(conn_fn, "count", get_table_size, db, table,
                         progress_info)
        table_info = rdb_call_wrapper(conn_fn, "info", write_table_metadata,
                                      db, table, directory)

        with stream_semaphore:
            task_queue = SimpleQueue()
            writer = launch_writer(format, directory, db, table, fields,
                                   task_queue, error_queue)
            writer.start()

            rdb_call_wrapper(conn_fn, "table scan", read_table_into_queue, db,
                             table, table_info["primary_key"], task_queue,
                             progress_info, exit_event)
    except (r.RqlError, r.RqlDriverError) as ex:
        error_queue.put((RuntimeError, RuntimeError(ex.message),
                         traceback.extract_tb(sys.exc_info()[2])))
    except:
        ex_type, ex_class, tb = sys.exc_info()
        error_queue.put((ex_type, ex_class, traceback.extract_tb(tb)))
    finally:
        if writer is not None and writer.is_alive():
            task_queue.put(
                ("exit", "event"
                 ))  # Exit is triggered by sending a message with two objects
            writer.join()
        else:
            error_queue.put(
                (RuntimeError, RuntimeError("writer unexpectedly stopped"),
                 traceback.extract_tb(sys.exc_info()[2])))
Exemplo n.º 25
0
    def __init__(self, func=None, iterable=None, args=None, kwargs=None, \
                 worker_type=None, worker_num=None, worker_remote=None, \
                 stride=None, buffer=None, ordered=True, skip=False, \
                 name=None):

        self.name = (name or 'numap_%s' % id(self))
        log.debug('%s %s starts initializing' % (self, self.name))
        if worker_type == 'process' and not HASMP:
            log.error('worker_type process requires multiprocessing')
            raise ImportError('worker_type process requires multiprocessing')
        self.worker_type = (worker_type or 'process')
        if worker_remote and not HASRP:
            log.error('worker_remote requires RPyC')
            raise ImportError('worker_remote requires RPyC')

        self._tasks = []
        self._tasks_tracked = {}
        self._started = Event()         # (if not raise TimeoutError on next)
        self._stopping = Event()        # (starting stopping procedure see stop)
        # pool options
        if worker_num is None:
            self.worker_num = stride or cpu_count()
        else:
            self.worker_num = worker_num
        self.worker_remote = (worker_remote or [])    # [('host', #workers)]
        self.stride = stride or \
                      self.worker_num + sum([i[1] for i in self.worker_remote])
        self.buffer = buffer            # defines the maximum number
        # of jobs which are in the input queue, pool and output queues
        # and next method

        # next method options
        self.ordered = ordered
        self.skip = skip

        # make pool input and output queues based on worker type.
        if self.worker_type == 'process':
            self._inqueue = SimpleQueue()
            self._outqueue = SimpleQueue()
            self._putin = self._inqueue._writer.send
            self._getout = self._outqueue._reader.recv
            self._getin = self._inqueue.get
            self._putout = self._outqueue.put
        elif self.worker_type == 'thread':
            self._inqueue = Queue()
            self._outqueue = Queue()
            self._putin = self._inqueue.put
            self._getout = self._outqueue.get
            self._getin = self._inqueue.get
            self._putout = self._outqueue.put

        # combine tasks into a weaved queue
        self._next_available = {}   # per-task boolean queue 
                                    # releases next to get a result
        self._next_skipped = {}     # per-task int, number of results
                                    # to skip (locked)
        self._task_next_lock = {}   # per-task lock around _next_skipped
        self._task_finished = {}    # a per-task is finished variable
        self._task_results = {}     # a per-task queue for results

        log.debug('%s finished initializing' % self)

        if bool(func) ^ bool(iterable):
            log.error('%s either, both or none func and iterable' % self + \
                      'have to be specified.')
            raise ValueError('%s either, both or none func and iterable' % self + \
                             'have to be specified.')
        elif bool(func) and bool(iterable):
            self.add_task(func, iterable, args, kwargs)
            self.start()
Exemplo n.º 26
0
    scafsFile = open(include, "rU")
    scafsToInclude = [line.rstrip() for line in scafsFile.readlines()]
    print >> sys.stderr, len(scafsToInclude), "scaffolds will be analysed."
    scafsFile.close()
else:
    scafsToInclude = None

##########################################################################################################

#counting stat that will let keep track of how far we are
windowsQueued = 0
resultsReceived = 0
resultsWritten = 0
resultsHandled = 0
'''Create queues to hold the data one will hold the line info to be passed to the analysis'''
windowQueue = SimpleQueue()
#one will hold the results (in the order they come)
resultQueue = SimpleQueue()
#one will hold the sorted results to be written
writeQueue = SimpleQueue()
'''start worker Processes for analysis. The comand should be tailored for the analysis wrapper function
of course these will only start doing anything after we put data into the line queue
the function we call is actually a wrapper for another function.(s) This one reads from the line queue, passes to some analysis function(s), gets the results and sends to the result queue'''
for x in range(threads):
    worker = Process(target=stats_wrapper,
                     args=(windowQueue, resultQueue, windType, genoFormat,
                           sampleData, minSites, stats, doPops, args.skipPairs,
                           args.indHet, args.addWindowID))
    worker.daemon = True
    worker.start()
    print >> sys.stderr, "started worker", x
Exemplo n.º 27
0
    scafLens = dict(scafLens)
else:
    scafs = headData[0]["contigs"]
    scafLens = headData[0]["contigLengths"]

##########################################################################################################

#counting stat that will let keep track of how far we are
windowsQueued = 0
resultsReceived = 0
resultsWritten = 0
linesWritten = 0


'''Create queues to hold the data. One will hold the pod info to be passed to the parser'''
inQueue = SimpleQueue()
#one will hold the results (in the order they come)
outQueue = SimpleQueue()
#one will hold the sorted results to be written
writeQueue = SimpleQueue()


'''start worker Processes for parser. The comand should be tailored for the analysis wrapper function
of course these will only start doing anything after we put data into the line queue
the function we call is actually a wrapper for another function.(s)
This one reads from the pod queue, passes each line some analysis function(s), gets the results and sends to the result queue'''
workerThreads = []
sys.stderr.write("\nStarting {} worker threads\n".format(args.threads))
for x in range(args.threads):
    workerThread = Process(target=parseAndMergeWrapper,args=(inQueue, outQueue, args.inFile, args.minQual, args.maxREFlen, args.field, gtFilters,
                                                             args.method, args.skipIndels, missing, args.excludeDuplicates, args.simplifyALT,
Exemplo n.º 28
0
 def __init__(self):
     self.pipe = SimpleQueue()
     self.message = None
Exemplo n.º 29
0
from model.dut_parameters import DutParameters
from multiprocessing import Array, Process
from multiprocessing.queues import SimpleQueue
from Tkinter import Button, Checkbutton, Entry, Frame, Label, LabelFrame
from Tkinter import IntVar, StringVar
from Tkinter import N, S, E, W, ACTIVE, CENTER, DISABLED, RIDGE
from helper import ni_usb_6800 as NI
from helper import source_meter as SM

import os
import time
import tkFileDialog
import tkMessageBox


log_queue = SimpleQueue()


class GuiMainWindow(Frame):

    def __init__(self, master, environment):

        Frame.__init__(self, master)

        self.environment = environment
        self.number_of_devices = self.environment.number_of_devices

        self.cc2541_checked = IntVar()
        self.efm32_checked = IntVar()
        self.bluetooth_checked = IntVar()
        self.ni_usb_checked = IntVar()
Exemplo n.º 30
0
    def _handle_ASes(self):
        """Spawns several processes (based on the available CPUs) to handle the
        AS resolving and creates the necessary objects based on the results.
        """
        # Gather all the ASNs seen through filter and recursive resolving.
        all_ASNs = list((self.recursed_ASes | self.AS_list) - self.black_list)
        all_ASNs_count = len(all_ASNs)
        if all_ASNs_count < 1:
            return

        # We will devote all but one core to resolving since the main process
        # will handle the objects' creation.
        number_of_resolvers = mp.cpu_count() - 1
        if number_of_resolvers < 1:
            number_of_resolvers = 1

        # The list of ASNs is going to be distributed almost equally to the
        # available resolvers.
        if all_ASNs_count >= number_of_resolvers:
            slice_length = int(math.ceil(all_ASNs_count / float(number_of_resolvers)))
        else:
            number_of_resolvers = all_ASNs_count
            slice_length = 1

        result_q = SimpleQueue()  # NOTE: Only works with this queue.
        processes = []
        slice_start = 0
        for i in xrange(number_of_resolvers):
            ASN_batch = all_ASNs[slice_start:slice_start+slice_length]
            processes.append(mp.Process(target=_subprocess_AS_resolving, args=(ASN_batch, result_q)).start())
            slice_start += slice_length

        # PROGRESS START
        # Show progress while running.
        # Can be safely commented out until PROGRESS END.
        aps_count = 0
        aps = 0
        time_start = time.time()
        # PROGRESS END

        done = 0
        while done < all_ASNs_count:
            try:
                asn, routes = result_q.get()
            except Empty:
                # This should never be reached with this queue but left here
                # just in case.
                time.sleep(0.2)
                continue

            # If the AS has routes create the appropriate ASN object and add it
            # to the data pool.
            if routes is not None and (routes['ipv4'] or routes['ipv6']):
                ASN_object = rpsl.ASObject(asn)
                for prefix in routes['ipv4']:
                    route_object = rpsl.RouteObject(prefix, asn)
                    ASN_object.route_obj_dir.append_route_obj(route_object)
                for prefix in routes['ipv6']:
                    route6_object = rpsl.Route6Object(prefix, asn)
                    ASN_object.route_obj_dir.append_route_obj(route6_object)
                self.AS_dir.append_ASN_obj(ASN_object)
            done += 1

        # PROGRESS START
        # Show progress while running.
        # Can be safely commented out until PROGRESS END.
            aps_count += 1
            time_diff = time.time() - time_start
            if time_diff >= 1:
                aps = aps_count / time_diff
                aps_count = 0
                time_start = time.time()
            sys.stdout.write("{} of {} ASes | {:.0f} ASes/s          \r"
                             .format(done, all_ASNs_count, aps))
            sys.stdout.flush()
        print