Ejemplo n.º 1
0
    def image_download(self):
        queue_endian = self.queue_endian
        threads_num = self.num_threads
        download_item_queue = ProcQueue()
        save_path = self.save_path
        download_item_dict = self.download_item_dict

        logger.debug("Item Num Producer")
        for goodscode in download_item_dict:
            item_id = str(goodscode)
            input_data = [save_path, item_id]
            download_item_queue.put(input_data)
        for i in range(0, threads_num):
            download_item_queue.put(queue_endian)

        run_threads = list()

        logger.debug("Item Image URL check")
        for i in range(0, threads_num):
            __single_thread = ImageDownloaderWorker(download_item_queue, queue_endian)
            run_threads.append(__single_thread)
            __single_thread.start()

        for __single_thread in run_threads:
            __single_thread.join()

        logger.debug("Close Queues")
        download_item_queue.close()
        download_item_queue.join_thread()
Ejemplo n.º 2
0
class ProcessExecutor(object):

    def __init__(self):
        self.processes = []
        self.q = Queue()

    def wait_until_finished(self):
        while self.processes:
            processes = self.processes
            self.processes = []
            [_process.join() for _process in processes]
        self.q.close()
        self.q.join_thread()

    def clean(self):
        self.processes = []

    def execute(self, fn, *args, **kwargs):
        promise = Promise()

        self.q.put([promise, fn, args, kwargs], False)
        _process = Process(target=queue_process, args=(self.q))
        _process.start()
        self.processes.append(_process)
        return promise
Ejemplo n.º 3
0
def test_tiny_circuit():
    t = 2
    n = 5
    c_path = "circuits/tiny_circuit.txt"
    triples = gen_triples(t, n, 2)
    mq = Queue()
    queues = [Queue() for _ in range(n)]
    x = '01'
    y = '10'
    result = '11'
    x_shares = Shamir(t, n).share_bitstring_secret(x)
    y_shares = Shamir(t, n).share_bitstring_secret(y)
    processes = []
    for i in range(n):
        p = Process(target=run_circuit_process,
                    args=(t, n, c_path, i + 1, queues, mq,
                          x_shares[i] + y_shares[i], triples[i]))
        processes.append(p)
    for p in processes:
        p.start()
    vals = []
    while len(vals) < n:
        if not mq.empty():
            vals.append(mq.get())
    reconstructed = Shamir(t, n).reconstruct_bitstring_secret(vals)
    print("result:", reconstructed)
    assert reconstructed == result, "result incorrect"
    print("TEST PASSED")
    for p in processes:
        p.join()
    for q in queues:
        q.close()
        q.join_thread()
    mq.close()
    mq.join_thread()
Ejemplo n.º 4
0
def main(source):
    """ main method"""
    print("Starting parse of file %s at %s" %(source, time.ctime()))
    loadAuthorFilter()

    start_time = time.time()
    queue = Queue()

    process_list = list()
    
    for _ in range(4):
        process_list.append(Process(target=insertIntoGraph, args=(queue,)))
        #process_list.append(Process(target=createCypherFiles, args=(queue,)))

    for process in process_list:
        process.start()

    source = gzip.open(source, mode='rt', encoding='latin-1')
    parser = make_parser()
    parser.setContentHandler(DBLPContentHandler(queue))
    parser.parse(source)

    queue.join_thread()

    #TODO: Make a test, with this instructions all processes never end.
    #for process in process_list:
    #    process.join()
    #    logging.info("Consumer terminated")

    elapsed_time = time.time() - start_time
    print("Execution time ", datetime.timedelta(seconds=elapsed_time))
    print("Finish")
Ejemplo n.º 5
0
def main():
    # create thread for each request
    queue = Queue()
    datas = []
    res = []
    threads = []
    for i in range(numReqs):
        if (i == numReqs - 1):
            th = threading.Thread(target=sendRequest, args=(None, queue))
        elif ((i + 1) % 2 == 0):
            data = json.dumps({'action': 'jump', 'time': i + 1})
            datas.append(data)
            th = threading.Thread(target=sendRequest, args=(data, queue))
        elif ((i + 1) % 5 == 0):
            data = json.dumps({'action': 'swim', 'time': i + 1})
            datas.append(data)
            th = threading.Thread(target=sendRequest, args=(data, queue))
        else:
            th = threading.Thread(target=sendRequest, args=(None, queue))
        th.start()
        res.append(queue.get())
    # need queue delay when numReqs is small
    if (numReqs < 10): time.sleep(0.1)
    queue.close()
    queue.join_thread()
    for t in threads:
        t.join()
    # evaluate res
    if (evalAvg(datas, res[len(res) - 1])):
        print 'Success'
    else:
        print 'Failed'
Ejemplo n.º 6
0
def test_mpc(t, n, c_path, n_triples, inputs, result, reflect=False):
    triples = gen_triples(t, n, n_triples)
    mq = Queue()
    queues = [Queue() for _ in range(n)]
    share_inputs = [Shamir(t, n).share_bitstring_secret(i) for i in inputs]
    processes = []
    for i in range(n):
        inputs = []
        for si in share_inputs:
            inputs.extend(si[i])
        p = Process(target=run_circuit_process, args=(t, n, c_path, i+1, queues, mq, inputs, triples[i]))
        processes.append(p)
    start = time.time()
    for p in processes:
        p.start()
    t1 = Thread(target=consumer, args=(mq, n, result, t, processes, reflect))
    t1.start()
    for p in processes:
        if p.is_alive():
            p.join()
    t1.join(n)
    print(f"time: {round(time.time()-start, 4)} seconds")
    for q in queues:
        q.close()
        q.join_thread()
    mq.join_thread()
Ejemplo n.º 7
0
def email_campaign_manager_process(
    *, campaign: Campaign, queue: mp.Queue, quit_event: mp.Event
):
    """
    Main function of the process responsible for scheduling the sending of campaigns

    :param campaign: the campaign for which messages must be sent
    :type campaign: :class:`nuntius.models.Campaign`

    :param queue: the work queue on which email messages are put
    :type queue: : class:`multiprocessing.Queue`

    :param quit_event: an event that may be used by the main process to tell the manager it needs to quit
    :type quit_event: class:`multiprocessing.Event`
    """
    queryset = campaign.get_subscribers_queryset()
    # eliminate people who already received the message
    queryset = queryset.annotate(
        already_sent=Exists(
            CampaignSentEvent.objects.filter(
                subscriber_id=OuterRef("pk"), campaign_id=campaign.id
            ).exclude(result=CampaignSentStatusType.PENDING)
        )
    ).filter(already_sent=False)

    campaign_finished = False

    for subscriber in queryset.iterator():
        if quit_event.is_set():
            break

        if subscriber.get_subscriber_status() != AbstractSubscriber.STATUS_SUBSCRIBED:
            continue

        sent_event = campaign.get_event_for_subscriber(subscriber)

        # just in case there is another nuntius_worker started, but this should not happen
        if sent_event.result != CampaignSentStatusType.PENDING:
            continue

        message = message_for_event(sent_event)

        try:
            put_in_queue_or_quit(
                queue,
                (message, sent_event.id),
                event=quit_event,
                polling_period=app_settings.POLLING_INTERVAL,
            )
        except GracefulExit:
            break
    else:
        campaign_finished = True

    queue.close()
    queue.join_thread()
    # everything has been scheduled for sending:
    if campaign_finished:
        campaign.status = Campaign.STATUS_SENT
        campaign.save()
Ejemplo n.º 8
0
def find_img_pos_multi(screen, img):
    s = time.time()
    H, W = screen.shape[0:2]
    h, w = img.shape[0:2]
    min_diff = 10000
    pos = np.array([0, 0])
    all_pixel_num = (H - h + 1) * (W - w + 1)

    Ws = [0, 1 * W // 4, 2 * W // 4, 3 * W // 4, W]
    result = Queue()
    procs = []
    for i in range(4):
        proc = Process(target=TargetFunction.find_img_pos_multi_target,
                       args=(screen, img, result, Ws[i], Ws[i + 1]))
        procs.append(proc)
        proc.start()

    results = []
    for i in range(4):
        results.append(result.get())

    pos, min_diff = min(results, key=lambda x: x[1])

    result.close()
    result.join_thread()

    for proc in procs:
        proc.join()
    # print(time.time() - s)
    return pos, min_diff
Ejemplo n.º 9
0
 def sentence_checker(self, sentence, context_sentences, q):
     print 'Process webExtracter.sentence_checker running...'
     jobs = []
     scrapped_data = {}
     query = sentence
     for j in search(query, tld="co.in", num=5, stop=1, pause=2):
         q = Queue()
         z = webExtracter()
         p = Process(target=z.scrapper, args=(j, q))
         jobs.append(p)
         trusted_sources = [r'quora']
         for source in trusted_sources:
             if re.search(source, j):
                 start = time.clock()
                 p.start()
                 p.join()
                 print 'time taken for this process is ', time.clock(
                 ) - start
                 scrapped_data[j] = q.get()
                 break
         else:
             p.start()
             p.join(2)
             scrapped_data[j] = q.get()
             #print 7
             if p.is_alive():
                 print "this shit is still running... let's kill it..."
                 p.terminate()
                 p.join()
                 scrapped_data[j] = 'KILLED'
     p = webExtracter()
     match_q = p.Data_Processing(scrapped_data, query, context_sentences)
     q.put(match_q)
     q.close()
     q.join_thread()
Ejemplo n.º 10
0
class _workerQpushTimer():
    def __init__(self):
	self.syncPeriod = 2 
	self.timer = None
	self.Qinit()
    def Qinit(self):
	self.syncTmpQ = Queue()
    # flush remain items in queue, and then close and join_thread
    def Qflush(self):
	while True:
	    try:
		self.syncTmpQ.get(True, comm.FLUSH_TIMEOUT)
	    except Empty:
		break
	self.syncTmpQ.close()
	self.syncTmpQ.join_thread()
    def enableTimer(self, workerPool):
	self.timer = Timer(self.syncPeriod, self.pushToWorkerQ, [workerPool])
	self.timer.start()
    def disableTimer(self):
	if self.timer is not None:
	    self.timer.cancel()
    # function executed periodically, used to sync queue between main process queue and worker queue
    def pushToWorkerQ(self, workerPool):
	while not comm.done.value:
	    try:
		item = self.syncTmpQ.get_nowait() 
		for w in workerPool:
		    w.queue.put_nowait(item)
	    except Empty:
		break
	if not comm.done.value:
	    self.enableTimer(workerPool)
Ejemplo n.º 11
0
class Eater:
    """ Worker which does work in another process so that the current process can continue.  Calls to apply can not
    return anything and return immediately.
    """
    def __init__(self, worker, n_jobs=-1, max_size=-1):
        self.worker = worker
        if n_jobs == -1:
            n_jobs = cpu_count()
        self.n_jobs = n_jobs
        self.max_size = max_size

        self.writeto = Queue(self.max_size)
        self.processes = [
            Process(target=TargetWrapper(self.worker,
                                         self.writeto,
                                         returns=False),
                    name='EaterPool {0}'.format(i)) for i in range(self.n_jobs)
        ]
        for process in self.processes:
            process.start()

    def eat(self, item):
        self.writeto.put((True, item))

    def close(self):
        for _ in self.processes:
            self.writeto.put((False, ))
        self.writeto.close()
        self.writeto.join_thread()
Ejemplo n.º 12
0
def test_mpc(t, n, c_path, n_triples, all_inputs, result, reflect=False):
    triples = gen_triples(t, n, n_triples)
    mq = Queue()
    queues = [Queue() for _ in range(n)]
    processes = []
    itypes = []
    for i in all_inputs[0]:
        if i in [0, 1]:
            itypes.append('V')
        else:
            itypes.append('S')
    c = Circuit(c_path, itypes)
    for i in range(n):
        p = Process(target=run_circuit_process,
                    args=(t, n, c, i + 1, queues, mq, all_inputs[i],
                          triples[i]))
        processes.append(p)
    start = time.time()
    for p in processes:
        p.start()
    t1 = Process(target=consumer, args=(mq, n, result, t, processes, reflect))
    t1.start()
    for p in processes:
        p.join()
    t1.join()
    print(f"time: {round(time.time()-start, 4)} seconds")
    while not mq.empty():
        mq.get()
    mq.close()
    for q in queues:
        q.close()
        q.join_thread()
    mq.join_thread()
Ejemplo n.º 13
0
def main():
    my_list = list(range(20000000))

    q = Queue()

    p1 = Process(target=do_sum, args=(q, my_list[:5000000]))
    p2 = Process(target=do_sum, args=(q, my_list[5000000:10000000]))
    p3 = Process(target=do_sum, args=(q, my_list[10000000:15000000]))
    p4 = Process(target=do_sum, args=(q, my_list[15000000:]))
    p1.start()
    p2.start()
    p3.start()
    p4.start()

    r1 = q.get()
    r2 = q.get()
    r3 = q.get()
    r4 = q.get()
    q.close()
    q.join_thread()

    p1.join()
    p2.join()
    p3.join()
    p4.join()

    print(r1 + r2 + r3 + r4)  # pylint: disable=print-statement
Ejemplo n.º 14
0
def main(connection_string):
    overall_start_time = time.time()
    session = setup_connection(connection_string, create_db=True)

    for entry in FILELIST:
        global CURRENT_FILENAME
        CURRENT_FILENAME = entry
        f_name = f"./databases/{entry}"
        if os.path.exists(f_name):
            logger.info(f"parsing database file: {f_name}")
            start_time = time.time()
            blocks = read_blocks(f_name)
            logger.info(
                f"database parsing finished: {round(time.time() - start_time, 2)} seconds"
            )

            logger.info('parsing blocks')
            start_time = time.time()

            jobs = Queue()

            workers = []
            # start workers
            logger.debug(f"starting {NUM_WORKERS} processes")
            for w in range(NUM_WORKERS):
                p = Process(target=parse_blocks,
                            args=(
                                jobs,
                                connection_string,
                            ),
                            daemon=True)
                p.start()
                workers.append(p)

            # add tasks
            for b in blocks:
                jobs.put(b)
            for i in range(NUM_WORKERS):
                jobs.put(None)
            jobs.close()
            jobs.join_thread()

            # wait to finish
            for p in workers:
                p.join()

            logger.info(
                f"block parsing finished: {round(time.time() - start_time, 2)} seconds"
            )
        else:
            logger.info(
                f"File {f_name} not found. Please download using download_dumps.sh"
            )

    CURRENT_FILENAME = "empty"
    logger.info(
        f"script finished: {round(time.time() - overall_start_time, 2)} seconds"
    )
Ejemplo n.º 15
0
    def start(self):
        # print(tf.config.experimental.list_physical_devices(device_type=None))
        # print(tf.config.experimental.list_logical_devices(device_type=None))

        self.episode_num = 200
        self.ps_num = 1
        self.worker_num = 3
        self.current_episode = 1
        global_remain_episode = Value('i', self.episode_num)
        global_alive_workers = Value('i', self.worker_num)
        global_res_queue = Queue()
        global_grad_queue = Queue()
        global_var_queues = [Queue(1) for i in range(self.worker_num)]
        cluster_config = self.make_cluster_config(self.ps_num, self.worker_num)

        pss = []
        workers = []
        episode_results = []

        cluster_spec = tf.train.ClusterSpec(cluster_config)
        
        for ps_id in range(self.ps_num):
            pss.append(Process(target = self.param_server, args=(ps_id, ps_id, global_remain_episode, global_alive_workers, global_grad_queue, global_var_queues)))

        for worker_id in range(self.worker_num):
            workers.append(Process(target = self.worker, args=(worker_id + self.ps_num, worker_id, global_remain_episode, global_alive_workers, global_grad_queue, global_var_queues[worker_id], global_res_queue)))

        for num in range(self.ps_num):
            pss[num].start()

        for num in range(self.worker_num):
            workers[num].start()

        while ((not global_res_queue.empty()) or (global_alive_workers.value > 0)):
            if not global_res_queue.empty():
                episode_results.append(global_res_queue.get())
                episode_res = episode_results.pop(0)
                print(f"Episode {self.current_episode} Reward with worker {episode_res['worker_id']}: {episode_res['reward']}\t| Loss: {episode_res['loss']}")
                self.current_episode += 1
            
        global_grad_queue.close()
        global_grad_queue.join_thread()

        global_res_queue.close()
        global_res_queue.join_thread()

        for queue in global_var_queues:
            queue.close()
            queue.join_thread()

        for num in range(self.worker_num):
            workers[num].join()
            print(f'Worker {num} join')

        for num in range(self.ps_num):
            pss[num].join()
            print(f'PS {num} join')
Ejemplo n.º 16
0
    def run_workers(self):
        '''Runs workers by assigning them a product index relative to the total number of products in the search query (starting from 0). 
        Checks for any resume point and continues from there. 
        Each Worker is given a product index to download, and is executed in a seperate Process. 
        Free workers are queued in process.Queue 
        '''
        # create locks
        request_lock = Lock()
        log_download_progress_lock = Lock()
        # loading resume points
        self.query = query_formatter.adjust_for_specific_product(self.query)
        ready_worker_queue = Queue(maxsize=len(self.worker_list))
        resume_point = self.load_resume_point()
        if resume_point == None:
            resume_point = 0
        else:
            resume_point = int(resume_point)

        for worker in self.worker_list:
            worker.register_settings(self.query, self.download_location,
                                     self.polling_interval,
                                     self.offline_retries)
            try:
                worker_resume_point = worker.load_resume_point()
                if worker_resume_point == None:
                    ready_worker_queue.put_nowait(worker)
                else:
                    p = Process(target=worker.run_in_seperate_process,
                                args=(worker_resume_point, ready_worker_queue,
                                      request_lock,
                                      log_download_progress_lock))
                    p.start()
            except Exception as e:
                self.logger.error(e)
                self.logger.error(Fore.RED + "Error in queueing workers")

        # assigning workers a result number to download
        for i in range(resume_point, int(self.total_results)):
            worker = ready_worker_queue.get()
            if worker.return_msg != None:
                self.logger.info(worker.return_msg)
            p = Process(target=worker.run_in_seperate_process,
                        args=(i, ready_worker_queue, request_lock,
                              log_download_progress_lock))
            p.start()
            resume_point += 1
            self.update_resume_point(resume_point)

        # clearing the last batch
        for i in range(0, len(self.worker_list)):
            worker = ready_worker_queue.get()
            if worker.return_msg:
                self.logger.info(worker.return_msg)
        ready_worker_queue.close()
        ready_worker_queue.join_thread()
        self._close_all_loggers()
        self.logger.info("Exiting...")
Ejemplo n.º 17
0
def processor():
    master_list = []
    master_dict = {}

    start = time.time()

    process_list = []

    p_num = int(cpu_count() / 2)

    with Pool(processes=p_num) as pool:
        x = int(8 / p_num)
        definer = lambda test_list, x: [
            test_list[i:i + x] for i in range(0, len(test_list), x)
        ]
        lists_per_core = definer(list(range(8)), x)

        pools = Queue()
        sub_master_list = Queue()

        pools.put(lists_per_core)
        sub_master_list.put({'placeholder': 0})  #IMPORTANT !!!!!!!!!!!

        for i in range(p_num):
            process_list.append(
                Process(target=assigner, args=(
                    i,
                    pools,
                    sub_master_list,
                )))
            process_list[i].start()

        pools.close()
        sub_master_list.close()
        pools.join_thread()
        sub_master_list.join_thread()

        for p in process_list:
            p.join()

        master_list = list(sub_master_list)

    print('Done')

    print(f'Time elapsed since launch {time.time()-start}')

    for i in range(8):
        master_dict = merger(master_dict, master_list[i], i)

    print('Done')

    master_dict = aver_maker(master_dict)

    print(master_dict)

    print(f'Time elapsed since launch {time.time()-start}')
Ejemplo n.º 18
0
def main():
    q = Queue(10)
    p1 = Process(target=func_put, args=(q, ))
    p1.start()
    p2 = Process(target=func_get, args=(q, ))
    p2.start()

    p1.join()
    q.close()
    p2.join()
    q.join_thread()
Ejemplo n.º 19
0
 def test_transaction_large(self) -> None:
     queue = Queue()  # type: Queue[str]
     msg = 't' * 100001  # longer than the max read size of 100_000
     p = Process(target=server, args=(msg, queue), daemon=True)
     p.start()
     connection_name = queue.get()
     with IPCClient(connection_name, timeout=1) as client:
         assert client.read() == msg.encode()
         client.write(b'test')
     queue.close()
     queue.join_thread()
     p.join()
Ejemplo n.º 20
0
 def test_transaction_large(self) -> None:
     queue = Queue()  # type: Queue[str]
     msg = 't' * 100001  # longer than the max read size of 100_000
     p = Process(target=server, args=(msg, queue), daemon=True)
     p.start()
     connection_name = queue.get()
     with IPCClient(connection_name, timeout=1) as client:
         assert client.read() == msg.encode()
         client.write(b'test')
     queue.close()
     queue.join_thread()
     p.join()
Ejemplo n.º 21
0
class BaseWorker:

    name = 'base'

    def __init__(self):
        self._command_queue = Queue()
        self._result_queue = Queue()
        self._stop_requested = Event()
        self._command_queue_lock = Lock()
        self._process = Process(target=_run, args=(self._command_queue,
                                                   self._result_queue,
                                                   self._stop_requested))

    def __enter__(self):
        self.run()
        return self

    def __exit__(self, *args):
        self.stop()

    def run(self):
        logger.info('starting %s worker', self.name)
        self._process.start()

    def stop(self):
        logger.info('stopping %s worker', self.name)
        # unblock the command_queue in the worker
        self._stop_requested.set()

        # close both queues
        self._command_queue.close()
        self._command_queue.join_thread()
        self._result_queue.close()
        self._result_queue.join_thread()

        # wait for the worker process to stop
        if self._process.is_alive():
            self._process.join()

        logger.info('%s worker stopped', self.name)

    def send_cmd_and_wait(self, cmd, *args, **kwargs):
        if not self._process.is_alive():
            logger.info('%s process is dead quitting', self.name)
            # kill the main thread
            os.kill(os.getpid(), signal.SIGTERM)
            # shutdown the current thread execution so that executor.shutdown does not block
            sys.exit(1)

        with self._command_queue_lock:
            self._command_queue.put((cmd, args, kwargs))
            return self._result_queue.get()
Ejemplo n.º 22
0
def main():
    # build_proxy()
    queue = Queue(2048)
    for i in range(128):
        Process(target=retrieve_from_queue, args=(queue, )).start()

    with open('samples.log') as f:
        process(f, queue)

    queue.close()
    queue.join_thread()
    # pool.close()
    pool.join()
Ejemplo n.º 23
0
class Layer2Worker():
    def __init__(self):
	self.queue = Queue()
    # close and join_thread queue
    def Qflush(self):
	while True:
	    time.sleep(comm.FLUSH_TIMEOUT)
	    if self.queue.qsize() == 0:
		break
	self.queue.close()
	self.queue.join_thread()
    def saveProcess(self, process):
	self.process = process
Ejemplo n.º 24
0
 def start(self, port, msgs: multiprocessing.Queue, inbound_msgs, key):
     self.msgs = msgs
     self.key = key
     #f = io.StringIO()
     #with redirect_stdout(f):
     #web.run_app(self.app, host='127.0.0.1', port=port,
     #                handle_signals=True)
     loop = asyncio.get_event_loop()
     loop.run_until_complete(self._run_server(port, inbound_msgs))
     msgs.close()
     inbound_msgs.close()
     msgs.join_thread()
     inbound_msgs.join_thread()
Ejemplo n.º 25
0
def main():
    # build_proxy()
    queue = Queue(2048)
    for i in range(128):
        Process(target=retrieve_from_queue, args=(queue,)).start()

    with open('samples.log') as f:
        process(f, queue)

    queue.close()
    queue.join_thread()
    # pool.close()
    pool.join()
Ejemplo n.º 26
0
    def get_item_url_check(self):
        logger.debug("URL Item Exist Check")
        item_dict = self.item_dict
        item_dict_key = item_dict.keys()

        queue_endian = self.queue_endian
        threads_num = self.num_threads
        good_item_queue = ProcQueue()
        good_item_result_queue = ProcQueue()

        logger.debug("Item Num Producer")
        for goodscode in item_dict_key:
            item_id = str(goodscode)
            good_item_queue.put(item_id)
        for i in range(0, threads_num):
            good_item_queue.put(queue_endian)

        run_threads = list()

        logger.debug("Item Image URL check")
        for i in range(0, threads_num):
            __single_thread = ImageCheckWorker(good_item_queue,
                                               good_item_result_queue,
                                               queue_endian)
            run_threads.append(__single_thread)
            __single_thread.start()

        for __single_thread in run_threads:
            __single_thread.join()

        # good_item_result_queue.put(queue_endian)

        logger.debug("Get Null Item List")
        null_image_list = ImageResultSum(good_item_result_queue, queue_endian,
                                         threads_num)

        logger.debug("Remove Null Item List")
        for null_item in null_image_list:
            del item_dict[null_item]

        logger.debug("Close Queues")
        good_item_queue.close()
        good_item_queue.join_thread()

        good_item_result_queue.close()
        good_item_result_queue.join_thread()

        self.item_dict = item_dict
        print(str(self.item_dict))

        return self.item_dict
Ejemplo n.º 27
0
def test_queue():
	q=Queue()
	#procLst=[Process(target=p, args=(q,) ) for p in [prod1, prod2, consum1, consum2]]
	pLst=[Process(target=p, args=(q,) ) for p in [prod1, prod2]]
	cLst=[Process(target=p, args=(q,) ) for p in [consum1, consum2]]
	procLst=pLst+cLst
	for pp in procLst:
		pp.start()
#	for pp in pLst:
#		pp.join()
#	q.put('STOP')
	q.close()
#	print 'Queue is closed'
	q.join_thread()
Ejemplo n.º 28
0
def video_capture():
    """
    Video capture.
    """
    # Default video capture
    cap = cv2.VideoCapture(0)
    if not cap.isOpened():
        logging.error("Cannot open camera")
        exit()

    input_q = Queue()
    output_q = Queue()
    process = Process(target=object_detection, args=(input_q, output_q))
    process.start()
    processed_frame = None

    cv2.namedWindow("main")
    cv2.namedWindow("object")

    while True:
        # Capture frame-by-frame
        ret, frame = cap.read()

        # If frame is read correctly ret is True
        if not ret:
            logging.error("Can't receive frame. Exiting ...")
            break

        if input_q.empty():
            input_q.put(frame)

        concat_frame = frame
        if not output_q.empty():
            processed_frame = output_q.get()
            cv2.imshow("object", processed_frame)

        cv2.imshow("main", frame)

        if cv2.waitKey(1) == ord("q"):
            input_q.close()
            output_q.close()
            input_q.join_thread()
            output_q.join_thread()
            process.terminate()
            break

    # When everything done, release the capture
    cap.release()
    cv2.destroyAllWindows()
Ejemplo n.º 29
0
class Director(object):
    def __init__(self, producer, consumer):
        self.producer = producer
        self.consumer = consumer
        self.queue = Queue()
        self.prod_proc = Process(target = self.produce)
        self.prod_proc.daemon = True
        self.lock = Lock()
        self.done = Value('b')
        self.done.value = 0

    def start(self):
        self.prod_proc.start()

    def step(self):
        self.lock.acquire()
        done = (self.done.value != 0)
        self.lock.release()
        if done:
            raise Done
        try:
            data = self.queue.get(block = True, timeout = 1.0)
            self.consumer.consume(data)
        except Empty:
            pass

    def stop(self):
        self.prod_proc.join()

    def run(self):
        self.start()
        while True:
            try:
                self.step()
            except Done:
                break
        self.stop()
        
    def produce(self):
        try:
            while True:
                data = self.producer.produce()
                self.queue.put(data)
        except:
            self.lock.acquire()
            self.done.value = 1
            self.lock.release()
            self.queue.close()
            self.queue.join_thread()
Ejemplo n.º 30
0
class Director(object):
    def __init__(self, producer, consumer):
        self.producer = producer
        self.consumer = consumer
        self.queue = Queue()
        self.prod_proc = Process(target=self.produce)
        self.prod_proc.daemon = True
        self.lock = Lock()
        self.done = Value('b')
        self.done.value = 0

    def start(self):
        self.prod_proc.start()

    def step(self):
        self.lock.acquire()
        done = (self.done.value != 0)
        self.lock.release()
        if done:
            raise Done
        try:
            data = self.queue.get(block=True, timeout=1.0)
            self.consumer.consume(data)
        except Empty:
            pass

    def stop(self):
        self.prod_proc.join()

    def run(self):
        self.start()
        while True:
            try:
                self.step()
            except Done:
                break
        self.stop()

    def produce(self):
        try:
            while True:
                data = self.producer.produce()
                self.queue.put(data)
        except:
            self.lock.acquire()
            self.done.value = 1
            self.lock.release()
            self.queue.close()
            self.queue.join_thread()
Ejemplo n.º 31
0
def _worker(worker_class: Type[Worker],
            input_queue: Queue,
            output_queue: Queue,
            num_active_workers: Value,
            worker_id: int,
            kwargs=None) -> None:
    """
    A worker that pulls data pints off the input queue, and places the execution result on the output queue.
    When there are no data pints left on the input queue, it decrements
    num_active_workers to signal completion.
    """

    if kwargs is None:
        kwargs = {}

    logger.info(f"Reader worker: {worker_id} PID: {os.getpid()}")
    try:
        worker = worker_class.start(**kwargs)

        # Keep going until you get an item that's None.
        def input_queue_iterable() -> Iterable[Any]:
            while True:
                item = input_queue.get()
                if item == QueueSignals.stop:
                    break
                yield item

        for processed_item in worker.process(input_queue_iterable()):
            output_queue.put(processed_item)
    except Exception as e:  # pylint: disable=broad-except
        logger.exception(e)
        output_queue.put(QueueSignals.error)
    finally:
        # It's important that we close and join the queue here before
        # decrementing num_active_workers. Otherwise our parent may join us
        # before the queue's feeder thread has passed all buffered items to
        # the underlying pipe resulting in a deadlock.
        #
        # See:
        # https://docs.python.org/3.6/library/multiprocessing.html?highlight=process#pipes-and-queues
        # https://docs.python.org/3.6/library/multiprocessing.html?highlight=process#programming-guidelines
        output_queue.close()
        output_queue.join_thread()

        with num_active_workers.get_lock():
            num_active_workers.value -= 1

        logger.info(f"Reader worker {worker_id} finished")
Ejemplo n.º 32
0
def video_capture():
    os.system(camSetupScript)
    frame_width = 640
    frame_height = 480
    stream = cv2.VideoCapture(0)
    stream.set(cv2.CAP_PROP_FRAME_WIDTH, CAPTURE_WIDTH)
    stream.set(cv2.CAP_PROP_FRAME_HEIGHT, CAPTURE_HEIGHT)

    start = time.time()
    timeElapsed = 0
    frameCount = 0

    def worker(q):
        out = cv2.VideoWriter(filename + ".avi",
                              cv2.VideoWriter_fourcc(*"MJPG"), 30.0,
                              (frame_width, frame_height))
        while q.empty():
            continue
        count = 0
        while not q.empty():
            out.write(q.get())
            count += 1
            if count % 100 == 0:
                print("Wrote ", count, " frames.")
        out.release()
        print("Finished writing.")

    queue = Queue()
    p = Process(target=worker, args=(queue, ))
    p.daemon = True
    p.start()

    while timeElapsed < 1000 / 30:  # 1000 frames.
        ret, frame = stream.read()
        if ret:
            queue.put(frame)
            frameCount += 1
        if frameCount % 100 == 0:
            print("Read ", frameCount, " frames.")
        timeElapsed = time.time() - start
    print('Total frames: ', frameCount)

    p.join()
    queue.close()
    queue.join_thread()

    print("Shutting down.")
    stream.release()
Ejemplo n.º 33
0
def start(test):
    if not test:
        print("Please provide a value for --test option")
        exit(1)

    if test == "with_update_lock":
        target = with_update_lock
    elif test == "with_optimistic_lock":
        target = with_optimistic_lock
    elif test == "with_serializable":
        target = with_serializable
    else:
        print("Bad argument value for test")
        exit(1)

    signal.signal(signal.SIGINT, handle_sigint)
    print(target.__name__)
    q = Queue()

    processes = []
    no_of_processes = 2
    for _ in range(no_of_processes):
        process = Process(target=target, args=(q, ))
        processes.append(process)
        process.start()
        q.put(False)

        time.sleep(1)

    while True:
        if kill:
            for _ in range(no_of_processes):
                q.put(True)
            break

        dead = True
        for p in processes:
            if p.is_alive():
                dead = False

        if dead:
            break

    q.close()
    q.join_thread()
    for process in processes:
        process.join()
Ejemplo n.º 34
0
    def test_run_flush_retire(self, fake_process_data, fake_get_logger,
                              fake_flush_on_term):
        """``Worker`` 'run' flushes any data before retiring"""
        idle_queue = MagicMock()
        work_queue = Queue()
        work_queue.put(worker.SENTINEL)

        w = DerpWorker(work_group='testing',
                       work_queue=work_queue,
                       idle_queue=idle_queue)
        w.run()
        work_queue.close()
        work_queue.join_thread()

        called_fake_flush_on_term = fake_flush_on_term.called

        self.assertTrue(called_fake_flush_on_term)
Ejemplo n.º 35
0
    def test_connect_twice(self) -> None:
        queue = Queue()  # type: Queue[str]
        msg = 'this is a test message'
        p = Process(target=server, args=(msg, queue), daemon=True)
        p.start()
        connection_name = queue.get()
        with IPCClient(connection_name, timeout=1) as client:
            assert client.read() == msg.encode()
            client.write(b'')  # don't let the server hang up yet, we want to connect again.

        with IPCClient(connection_name, timeout=1) as client:
            assert client.read() == msg.encode()
            client.write(b'test')
        queue.close()
        queue.join_thread()
        p.join()
        assert p.exitcode == 0
Ejemplo n.º 36
0
    def test_connect_twice(self) -> None:
        queue = Queue()  # type: Queue[str]
        msg = 'this is a test message'
        p = Process(target=server, args=(msg, queue), daemon=True)
        p.start()
        connection_name = queue.get()
        with IPCClient(connection_name, timeout=1) as client:
            assert client.read() == msg.encode()
            client.write(
                b''
            )  # don't let the server hang up yet, we want to connect again.

        with IPCClient(connection_name, timeout=1) as client:
            client.write(b'test')
        queue.close()
        queue.join_thread()
        p.join()
Ejemplo n.º 37
0
    def test_run_processes_data(self, fake_process_data, fake_get_logger):
        """``Worker`` the 'run' method calls 'process_data' with what is pulls form the work_queue"""
        idle_queue = MagicMock()
        work_queue = Queue()
        work_queue.put('some Work')
        work_queue.put(worker.SENTINEL)

        w = DerpWorker(work_group='testing',
                       work_queue=work_queue,
                       idle_queue=idle_queue)
        w.run()
        work_queue.close()
        work_queue.join_thread()

        called_process_data = fake_process_data.called

        self.assertTrue(called_process_data)
Ejemplo n.º 38
0
def main():
    #build_proxy()
    #pool = Pool(processes=512)
    #m = Manager()
    queue = Queue(2048)
    pool = []*10
    for i in range(128):
        p = Process(target=retrieve_from_queue, args=(queue,))
        p.start()
    exist_file = 0
    socket.setdefaulttimeout(3)
    with open('samples.log') as f:
        for index, line in enumerate(f):
            if index % 1000 == 0:
                print index, line
            try:
                args = line.split()
                if len(args) == 2:
                    count, url = args
                else:
                    count = args[0]
                    url = ''.join(args[1:])
            except Exception as e:
                print 'exception:', str(e), '|', line
                continue

            # print 'main:', count, url
            fname = urlparse(url).path.split('/')[-1]
            path = './imgs/'+str(index)+'.'+count+'.'+fname

            '''
            result = pool.apply_async(
                    retrieve,
                    args=(url, path, queue),
                    callback=callback
            )
            '''
            queue.put((url, path))
        print 'apply async done'
    queue.close()
    queue.join_thread()
    # pool.close()
    pool.join()
    for e in exceptions:
        print e
Ejemplo n.º 39
0
class ProcessExecutor(object):
    def __init__(self):
        self.processes = []
        self.q = Queue()

    def wait_until_finished(self):
        for _process in self.processes:
            _process.join()
        self.q.close()
        self.q.join_thread()

    def execute(self, fn, *args, **kwargs):
        promise = Promise()

        self.q.put([promise, fn, args, kwargs], False)
        _process = Process(target=queue_process, args=(self.q))
        _process.start()
        self.processes.append(_process)
        return promise
Ejemplo n.º 40
0
def split(data, processes):
    '''
    Fork off a number of processes. The data processing will be split
    among those processes.

    The data goes into a queue. Processes pick of data from the queue
    until the queue is exhausted.

    processes is the number of processes to start.

    '''
    global _pid, _qout, _processes
    _processes = processes

    def forker():
        for i in range(processes):
            process = os.fork()
            if process != 0:
                return i
        if os.fork() == 0:
            return FeederThread
        else:
            return ConsumerThread

    qin = Queue(maxsize=1000)

    _pid = forker()

    # Feeder process
    if _pid == FeederThread:
        for d in data:
            qin.put(d, block=True)
        for d in range(processes):
            qin.put(EndOfQueue, block=True)
        qin.close()
        qin.join_thread()
        return []
    elif _pid == ConsumerThread:
        return []
    else:
        return iter(lambda: qin.get(block=True), EndOfQueue)
Ejemplo n.º 41
0
class ProcessHandler(object):
	def __init__(self, functions):
		self.processes = []
		self.num_of_processes = len(functions)
		self.functions = functions
		self.event_q = Queue()

	def start(self):
		# create/start processes and event queue
		for i in range(self.num_of_processes):
			function = self.functions[i]
			name = self.functions[i].__name__
			print function
			self.processes.append(Process(target=function, name=name, args=(self.event_q,)))
			
			self.processes[i].start()
			while not self.processes[i].is_alive():
				time.sleep(0.01)
			print self.processes[i]

	def close(self):
		for i in range(self.num_of_processes):
			self.event_q.close()
			self.event_q.join_thread()
			self.processes[i].join()
			print self.processes[i]

	def watchDog(self):
		for i in range(self.num_of_processes):
			if not self.processes[i].is_alive():
				print self.processes[i]
				function = self.functions[i]
				name = self.functions[i].__name__
				q = self.event_q
				self.processes[i] = Process(target=function, name=name, args=(q,))
				self.processes[i].start()
				while not self.processes[i].is_alive():
					time.sleep(0.1)
				print self.processes[i]
Ejemplo n.º 42
0
class Worker:
    """This class is used for poller and reactionner to work.
    The worker is a process launch by theses process and read Message in a Queue
    (self.s) (slave)
    They launch the Check and then send the result in the Queue self.m (master)
    they can die if they do not do anything (param timeout)

    """

    id = 0  # None
    _process = None
    _mortal = None
    _idletime = None
    _timeout = None
    _c = None

    def __init__(self, id, s, returns_queue, processes_by_worker, mortal=True, timeout=300, max_plugins_output_length=8192, target=None, loaded_into='unknown', http_daemon=None):
        self.id = self.__class__.id
        self.__class__.id += 1

        self._mortal = mortal
        self._idletime = 0
        self._timeout = timeout
        self.s = None
        self.processes_by_worker = processes_by_worker
        self._c = Queue()  # Private Control queue for the Worker
        # By default, take our own code
        if target is None:
            target = self.work
        self._process = Process(target=target, args=(s, returns_queue, self._c))
        self.returns_queue = returns_queue
        self.max_plugins_output_length = max_plugins_output_length
        self.i_am_dying = False
        # Keep a trace where the worker is launch from (poller or reactionner?)
        self.loaded_into = loaded_into
        if os.name != 'nt':
            self.http_daemon = http_daemon
        else: #windows forker do not like pickle http/lock
            self.http_daemon = None
        

    def is_mortal(self):
        return self._mortal


    def start(self):
        self._process.start()


    # Kill the background process
    # AND close correctly the queues (input and output)
    # each queue got a thread, so close it too....
    def terminate(self):
        # We can just terminate process, not threads
        if not is_android:
            self._process.terminate()
        # Is we are with a Manager() way
        # there should be not such functions
        if hasattr(self._c, 'close'):
            self._c.close()
            self._c.join_thread()
        if hasattr(self.s, 'close'):
            self.s.close()
            self.s.join_thread()

    def join(self, timeout=None):
        self._process.join(timeout)

    def is_alive(self):
        return self._process.is_alive()

    def is_killable(self):
        return self._mortal and self._idletime > self._timeout

    def add_idletime(self, time):
        self._idletime = self._idletime + time

    def reset_idle(self):
        self._idletime = 0

    def send_message(self, msg):
        self._c.put(msg)

    # A zombie is immortal, so kill not be kill anymore
    def set_zombie(self):
        self._mortal = False

    # Get new checks if less than nb_checks_max
    # If no new checks got and no check in queue,
    # sleep for 1 sec
    # REF: doc/shinken-action-queues.png (3)
    def get_new_checks(self):
        try:
            while(len(self.checks) < self.processes_by_worker):
                #print "I", self.id, "wait for a message"
                msg = self.s.get(block=False)
                if msg is not None:
                    self.checks.append(msg.get_data())
                #print "I", self.id, "I've got a message!"
        except Empty, exp:
            if len(self.checks) == 0:
                self._idletime = self._idletime + 1
                time.sleep(1)
        # Maybe the Queue() is not available, if so, just return
        # get back to work :)
        except IOError, exp:
            return
Ejemplo n.º 43
0
class TestSet(object):
    "Manage a set of test"

    def __init__(self, base_dir):
        """Initalise a test set
        @base_dir: base directory for tests
        """
        # Parse arguments
        self.base_dir = base_dir

        # Init internals
        self.task_done_cb = lambda tst, err: None # On task done callback
        self.task_new_cb = lambda tst: None       # On new task callback
        self.todo_queue = Queue()                 # Tasks to do
        self.message_queue = Queue()              # Messages with workers
        self.tests = []                           # Tests to run
        self.tests_done = []                      # Tasks done
        self.cpu_c = cpu_count()                  # CPUs available
        self.errorcode = 0                        # Non-zero if a test failed
        self.additional_args = []                 # Arguments to always add

    def __add__(self, test):
        "Same as TestSet.add"
        self.add(test)
        return self

    def add(self, test):
        "Add a test instance to the current test set"
        if not isinstance(test, Test):
            raise ValueError("%s is not a valid test instance" % (repr(test)))
        self.tests.append(test)

    def set_cpu_numbers(self, cpu_c):
        """Set the number of cpu to use
        @cpu_c: Number of CPU to use (default is maximum)
        """
        self.cpu_c = cpu_c

    def set_callback(self, task_done=None, task_new=None):
        """Set callbacks for task information retrieval
        @task_done: function(Test, Error message)
        @task_new: function(Test)
        """
        if task_done:
            self.task_done_cb = task_done
        if task_new:
            self.task_new_cb = task_new

    def _add_tasks(self):
        "Add tests to do, regarding to dependencies"
        for test in self.tests:
            # Check dependencies
            launchable = True
            for dependency in test.depends:
                if dependency not in self.tests_done:
                    launchable = False
                    break

            if launchable:
                # Add task
                self.tests.remove(test)
                self.todo_queue.put(test)

        if len(self.tests) == 0:
            # Poison pills
            for _ in xrange(self.cpu_c):
                self.todo_queue.put(None)

        # All tasks done
        if len(self.tests_done) == self.init_tests_number:
            self.message_queue.put(MessageClose())

    def _messages_handler(self):
        "Manage message between Master and Workers"

        # Main loop
        while True:
            message = self.message_queue.get()
            if isinstance(message, MessageClose):
                # Poison pill
                break
            elif isinstance(message, MessageTaskNew):
                # A task begins
                self.task_new_cb(message.task)
            elif isinstance(message, MessageTaskDone):
                # A task has been done
                self.tests_done.append(message.task)
                self._add_tasks()
                self.task_done_cb(message.task, message.error)
                if message.error is not None:
                    self.errorcode = -1
            else:
                raise ValueError("Unknown message type %s" % type(message))

    @staticmethod
    def worker(todo_queue, message_queue, init_args):
        """Worker launched in parrallel
        @todo_queue: task to do
        @message_queue: communication with Host
        @init_args: additionnal arguments for command line
        """

        # Main loop
        while True:
            # Acquire a task
            test = todo_queue.get()
            if test is None:
                break
            message_queue.put(MessageTaskNew(test))

            # Go to the expected directory
            current_directory = os.getcwd()
            os.chdir(test.base_dir)

            # Launch test
            testpy = subprocess.Popen(([sys.executable] +
                                       init_args + test.command_line),
                                      stdout=subprocess.PIPE,
                                      stderr=subprocess.PIPE)
            outputs = testpy.communicate()

            # Check result
            error = None
            if testpy.returncode != 0:
                error = outputs[1]

            # Restore directory
            os.chdir(current_directory)

            # Report task finish
            message_queue.put(MessageTaskDone(test, error))

    @staticmethod
    def fast_unify(seq, idfun=None):
        """Order preserving unifying list function
        @seq: list to unify
        @idfun: marker function (default is identity)
        """
        if idfun is None:
            idfun = lambda x: x
        seen = {}
        result = []
        for item in seq:
            marker = idfun(item)

            if marker in seen:
                continue
            seen[marker] = 1
            result.append(item)
        return result

    def _clean(self):
        "Remove produced files"

        # Build the list of products
        products = []
        current_directory = os.getcwd()
        for test in self.tests_done:
            for product in test.products:
                # Get the full product path
                products.append(os.path.join(current_directory, test.base_dir,
                                             product))

        # Unify the list and remove products
        for product in TestSet.fast_unify(products):
            try:
                os.remove(product)
            except OSError:
                print "Cleanning error: Unable to remove %s" % product

    def add_additionnal_args(self, args):
        """Add arguments to used on the test command line
        @args: list of str
        """
        self.additional_args += args

    def run(self):
        "Launch tests"

        # Go in the right directory
        self.current_directory = os.getcwd()
        os.chdir(self.base_dir)

        # Launch workers
        processes = []
        for _ in xrange(self.cpu_c):
            p = Process(target=TestSet.worker, args=(self.todo_queue,
                                                     self.message_queue,
                                                     self.additional_args))

            processes.append(p)
            p.start()

        # Add initial tasks
        self.init_tests_number = len(self.tests)
        # Initial tasks
        self._add_tasks()

        # Handle messages
        self._messages_handler()

        # Close queue and join processes
        self.todo_queue.close()
        self.todo_queue.join_thread()
        self.message_queue.close()
        self.message_queue.join_thread()
        for p in processes:
            p.join()

    def end(self, clean=True):
        """End a testset run
        @clean: (optional) if set, remove tests products
        PRE: run()
        """
        # Clean
        if clean:
            self._clean()

        # Restore directory
        os.chdir(self.current_directory)

    def tests_passed(self):
        "Return a non zero value if at least one test failed"
        return self.errorcode

    def filter_tags(self, include_tags=None, exclude_tags=None):
        """Filter tests by tags
        @include_tags: list of tags' name (whitelist)
        @exclude_tags: list of tags' name (blacklist)
        @include_tags and @exclude_tags cannot be used together"""

        if include_tags and exclude_tags:
            raise ValueError("Include and Exclude cannot be used together")

        new_testset_include = []
        new_testset_exclude = list(self.tests)

        # Update include and exclude lists
        for index, test in enumerate(self.tests):
            for tag in test.tags:
                if exclude_tags and tag in exclude_tags:
                    new_testset_exclude.remove(test)
                    break
                if include_tags and tag in include_tags:
                    new_testset_include.append(test)
                    break

        # Update testset list
        if include_tags:
            self.tests = new_testset_include
        elif exclude_tags:
            self.tests = new_testset_exclude
Ejemplo n.º 44
0
# Prepare multiprocess
cpu_c = cpu_count()
queue = Queue()
processes = []

for _ in xrange(cpu_c):
    p = Process(target=do_test, args=(args.filename, queue, machine,
                                      abicls, tests, map_addr,
                                      args.quiet, args.timeout, args.jitter,
                                      args.verbose))
    processes.append(p)
    p.start()

# Add tasks
for address in addresses:
    queue.put(address)

# Add poison pill
for _ in xrange(cpu_c):
    queue.put(None)

# Get results
queue.close()
queue.join_thread()
for p in processes:
    p.join()

if not queue.empty():
    print("An error occured: queue is not empty")
Ejemplo n.º 45
0
class Worker:
    id = 0#None
    _process = None
    _mortal = None
    _idletime = None
    _timeout = None
    _c = None
    def __init__(self, id, s, returns_queue, processes_by_worker, mortal=True, timeout=300, max_plugins_output_length=8192, target=None):
        self.id = self.__class__.id
        self.__class__.id += 1

        self._mortal = mortal
        self._idletime = 0
        self._timeout = timeout
        self.processes_by_worker = processes_by_worker
        self.input_queue = s
        self._c = Queue() # Private Control queue for the Worker
        # By default, take our own code
        if target is None:
            target=self.work
        self._process = Process(target=target, args=(s, returns_queue, self._c))
        self.returns_queue = returns_queue
        self.max_plugins_output_length = max_plugins_output_length
        self.i_am_dying = False


    def is_mortal(self):
        return self._mortal


    def start(self):
        self._process.start()


    # Kill the background process
    # AND close correctly the queues (input and output)
    # each queue got a thread, so close it too....
    def terminate(self):
        # We can just terminate process, not threads
        if not is_android:
            self._process.terminate()
        # Is we are with a Manager() way
        # there should be not such functions
        if hasattr(self._c, 'close'):
           self._c.close()
           self._c.join_thread()
        if hasattr(self.input_queue, 'close'):
           self.input_queue.close()
           self.input_queue.join_thread()


    def join(self, timeout=None):
        self._process.join(timeout)


    def is_alive(self):
        return self._process.is_alive()


    def is_killable(self):
        return self._mortal and self._idletime > self._timeout


    def add_idletime(self, time):
        self._idletime = self._idletime + time


    def reset_idle(self):
        self._idletime = 0


    def send_message(self, msg):
        self._c.put(msg)


    # A zombie is immortal, so kill not be kill anymore
    def set_zombie(self):
        self._mortal = False


    # Get new checks if less than nb_checks_max
    # If no new checks got and no check in queue,
    # sleep for 1 sec
    # REF: doc/shinken-action-queues.png (3)
    def get_new_checks(self):
        try:
            while(len(self.checks) < self.processes_by_worker):
                #print "I", self.id, "wait for a message"
                msg = self.s.get(block=False)
                if msg is not None:
                    self.checks.append(msg.get_data())
                #print "I", self.id, "I've got a message!"
        except Empty , exp:
            if len(self.checks) == 0:
                self._idletime = self._idletime + 1
                time.sleep(1)
        # Maybe the Queue() is not available, if so, just return
        # get back to work :)
        except IOError, exp:
            return
Ejemplo n.º 46
0
        glFlush()
        pygame.display.flip()
        try:
            result = queue_locations.get(False, 0.1)
            locations.extend(result)
            print "MOTHER > There are %d locations saved ... :)" % (len(locations))
            print "MOTHER > last location:\n", pformat(locations[-1])
            # current_location = len(locations)-1
            no_data = 0
        except QEmpty:
            if not e_startup.is_set():
                p_gps_upd.start()
                e_startup.set()
            no_data += r_chrono
            pass

        chrono_tmp = chrono
        chrono = pygame.time.get_ticks() / 1000.0

    print "MOTHER : closing queue locations"
    queue_locations.close()
    print "MOTHER : waiting end of queue"
    queue_locations.join_thread()
    print "MOTHER : set stopped event"
    e_stopped.set()
    print "MOTHER : waiting for GPSUPDATER to end"
    p_gps_upd.join()
    print "MOTHER : quit PyGame"
    pygame.quit()
Ejemplo n.º 47
0
class gpib:
    visaLib = visa.VisaLibrary()
    delay = 0#command transmit delay
    values_format = pyvisa.highlevel.single | pyvisa.highlevel.big_endian #this is now a keithley 2400 does binary transfers
    chunk_size = 102400 #need a slightly bigger transfer buffer than default to be able to transfer a full sample buffer (2500 samples) from a keithley 2400 in one shot
    def __init__(self,locationString=None,timeout=30,useQueues=False):
        self.locationString = locationString
        self.timeout = timeout
        self.useQueues = useQueues

        if self.locationString is not None:
            if self.useQueues: #queue mode
                #build the queues
                self.task_queue = Queue()
                self.done_queue = Queue()
                #kickoff the worker process
                self.p = Process(target=self._worker, args=(self.task_queue, self.done_queue))
                self.p.start()
            else:#non-queue mode
                self.v = visa.instrument(self.locationString,timeout=self.timeout,chunk_size=self.chunk_size,delay=self.delay,values_format=self.values_format)

    def __del__(self):
        if self.useQueues:
            if self.p.is_alive():
                self.task_queue.put('STOP')
            self.p.join()
            self.task_queue.close()
            self.done_queue.close()
            self.task_queue.join_thread()
            self.done_queue.join_thread()
        else:
            if hasattr(self,'v'):
                self.v.close()

    def _worker(self, inputQ, outputQ):
        #local, threadsafe instrument object created here
        v = visa.instrument(self.locationString,timeout=self.timeout,chunk_size=self.chunk_size,delay=self.delay,values_format=self.values_format)
        for func, args in iter(inputQ.get, 'STOP'):#queue processing going on here
            try:
                toCall = getattr(v,func)
                ret = toCall(*args)#visa function call occurs here
            except:
                ret = None
            if ret: #don't put None outputs into output queue
                outputQ.put(ret)
        print "queue worker closed properly"
        v.close()
        inputQ.close()
        outputQ.close()

    #make queue'd and non-queued writes look the same to the client
    def write(self,string):
        if self.useQueues:
            self.task_queue.put(('write',(string,)))
        else:
            self.v.write(string)

    #controls remote enable line
    def controlRen(self,mode):
        visa.Gpib()._vpp43.gpib_control_ren(mode)

    def clearInterface(self):
        self.visaLib.gpib_send_ifc()

    def findInstruments(self):
        return visa.get_instruments_list()    

def my_consumer(q):
    """
    Consumes some data and works on it

    In this case, all it does is double the input
    """
    while True:
        data = q.get()
        print('data found to be processed: {}'.format(data))
        processed = data * 2
        print(processed)

        if data is sentinel:
            break


if __name__ == '__main__':
    q = Queue()
    data = [5, 10, 13, -1]
    process_one = Process(target=creator, args=(data, q))
    process_two = Process(target=my_consumer, args=(q,))
    process_one.start()
    process_two.start()

    q.close()
    q.join_thread()

    process_one.join()
    process_two.join()
Ejemplo n.º 49
0
def refine_all(G,  **kwargs):
    realignbubbles=[]
    
    if kwargs['minsize']==None:
        kwargs['minsize']=kwargs['minlength']

    #detect all bubbles
    for b in bubbles.bubbles(G):

        if kwargs['complex']:
            if b.issimple():
                logging.debug("Skipping bubble %s, not complex."%str(b.nodes))
                continue

        if kwargs['simple']:
            if not b.issimple():
                logging.debug("Skipping bubble %s, not simple."%str(b.nodes))
                continue

        if b.minsize<kwargs['minsize']:
            logging.debug("Skipping bubble %s, smallest allele (%dbp) is smaller than minsize=%d."%(str(b.nodes),b.minsize,kwargs['minsize']))
            continue

        if b.maxsize<kwargs['minmaxsize']:
            logging.debug("Skipping bubble %s, largest allele (%dbp) is smaller than minmaxsize=%d."%(str(b.nodes),b.maxsize,kwargs['minmaxsize']))
            continue

        if b.maxsize>kwargs['maxsize']:
            logging.warn("Skipping bubble %s, largest allele (%dbp) is larger than maxsize=%d."%(str(b.nodes),b.maxsize,kwargs['maxsize']))
            continue

        if kwargs['maxcumsize']!=None:
            if b.cumsize>kwargs['maxcumsize']:
                logging.warn("Skipping bubble %s, cumulative size %d is larger than maxcumsize=%d."%(str(b.nodes),b.cumsize,kwargs['maxcumsize']))
                continue

        if b.cumsize<kwargs['mincumsize']:
            logging.info("Skipping bubble %s, cumulative size %d is smaller than mincumsize=%d."%(str(b.nodes),b.cumsize,kwargs['mincumsize']))
            continue

        if len(b.nodes)==3:
            logging.info("Skipping bubble %s, indel, no point in realigning."%(str(b.nodes)))
            continue

        realignbubbles.append(b)

    distinctbubbles=[]
    for b1 in realignbubbles:
        for b2 in realignbubbles:
            if set(b2.nodes).issuperset(set(b1.nodes)) and not set(b1.nodes)==set(b2.nodes):
                logging.debug("Skipping bubble %s, because its nested in %s."%(str(b1.nodes),str(b2.nodes)))
                break
        else:
            distinctbubbles.append(b1)



    logging.info("Realigning a total of %d bubbles"%len(distinctbubbles))
    nn=max([node for node in G.nodes() if type(node)==int])+1

    if kwargs['nproc']>1:

        inputq = Queue()
        outputq = Queue()

        nworkers=kwargs['nproc']
        aworkers=[]

        for i in range(nworkers):
            aworkers.append(Process(target=align_worker, args=(inputq,outputq)))

        for p in aworkers:
            p.start()

        for bubble in distinctbubbles:
            G.node[bubble.source]['aligned']=1
            G.node[bubble.sink]['aligned']=1

            logging.debug("Submitting realign bubble between <%s> and <%s>, max allele size %dbp (in nodes=%d)."%(bubble.source,bubble.sink,bubble.maxsize,len(bubble.nodes)-2))
            bnodes=list(set(bubble.nodes)-set([bubble.source,bubble.sink]))
            sg=G.subgraph(bnodes).copy()
            
            offsets=dict()
            for sid in G.node[bubble.source]['offsets']:
                offsets[sid]=G.node[bubble.source]['offsets'][sid]+len(G.node[bubble.source]['seq'])

            sourcesamples=set(G.node[bubble.source]['offsets'].keys())
            sinksamples=set(G.node[bubble.sink]['offsets'].keys())
            paths=sourcesamples.intersection(sinksamples)

            inputq.put((sg,bubble,offsets,paths,kwargs))

        for i in range(nworkers):
            inputq.put(-1)

        graph_worker(G,nn,outputq,nworkers)
        
        inputq.close()
        inputq.join_thread()

        outputq.close()

        for p in aworkers:
            p.join()

    else:
        for bubble in distinctbubbles:
            G.node[bubble.source]['aligned']=1
            G.node[bubble.sink]['aligned']=1

            bnodes=list(set(bubble.nodes)-set([bubble.source,bubble.sink]))
            sg=G.subgraph(bnodes)
            
            offsets=dict()
            for sid in G.node[bubble.source]['offsets']:
                offsets[sid]=G.node[bubble.source]['offsets'][sid]+len(G.node[bubble.source]['seq'])

            sourcesamples=set(G.node[bubble.source]['offsets'].keys())
            sinksamples=set(G.node[bubble.sink]['offsets'].keys())
            paths=sourcesamples.intersection(sinksamples)

            res=refine_bubble(sg,bubble,offsets,paths, **kwargs)
            if res==None:
                continue
            else:
                bubble,ng,path2start,path2end=res
                G,nn=replace_bubble(G,bubble,ng,path2start,path2end,nn)

    return G
Ejemplo n.º 50
0
Archivo: multi.py Proyecto: thaisdb/TCC
from clientAplication import Aplication
from clientTransport import TCPClient, UDPClient
from teste import Work


lock = Lock()
# app = Aplication()
jobs = Queue()


udp = Process(target=UDPClient, args=("HTTPRequest.txt", jobs))
udp.start()

jobs.put(Aplication())
jobs.wait()
jobs.close()
jobs.join_thread()
app.join()

"""
app = Process(target=Aplication, args=(jobs,))
app.start()

jobs.put(UDPClient("HTTPRequest.txt"))

#wait first process to finish
jobs.close()
jobs.join_thread()
app.join()
"""
Ejemplo n.º 51
0
class MPlayer(Popen):
   arg_types = {
      'String':(str, '"%s"'),
      'Float':(float, '%f'),
      'Integer':(int, '%d'),
   }

   @property
   def state(self):
      return self._state.value

   def __init__(self, args=None, path=None):
      self.path = path or 'mplayer' 
      self.args = args or []
      self.get_cmds()
      self.run()
   
   def __getattr__(self, attr):
      if attr in self.cmds:
         return lambda *args, **kargs: self.send_cmd(attr, args, kargs)
      raise AttributeError('MPlayer does not respond to: %s' % attr)

   def run(self):
      Popen.__init__(self, [self.path, '-idle', '-slave', '-quiet']+self.args, stdin=PIPE, stdout=PIPE, stderr=PIPE)
      self.manager = Manager()
      self.defaults = self.manager.dict()
      self._state = Value('i')
      self._state.value = STOPPED
      self.notifier = SelectQueue()
      self.calls = SelectQueue()
      self.results = Queue()
      self.ioworker = IOWorker(self.stdin, self.stdout, self._state, self.notifier, self.calls, self.results)
      self.ioworker.start()

   def kill(self):
      if self.notifier:
         self.notifier.close()
         self.notifier.join_thread()

      if self.results:
         self.results.close()
         self.results.join_thread()

      if self.ioworker:
         self.ioworker.terminate()

      if self.poll() is None:
         self.terminate()

   def restart(self):
      self.kill()
      self.run()

   def get_cmds(self):
      self.cmds = {}
      output = Popen([self.path, '-input', 'cmdlist'], stdout=PIPE)
      for line in output.stdout:
         match = re.search(b'^(\w+)\s+(.*)', line)
         cmd, args = [item.decode('utf-8') for item in match.groups()]
         self.cmds[cmd] = args.split(' ') if args else []

   def process_args(self, cmd, args):
      try:
         for i in range(len(args)):
            arg_type = self.cmds[cmd][i].strip('[]')
            arg_func, arg_mask = self.arg_types[arg_type]
            yield arg_mask % arg_func(args[i])
      except (IndexError, ValueError):
         raise ValueError('%s expects arguments of format %r' % (cmd, ' '.join(str(x) for x in self.cmds[cmd])))

   def send_cmd(self, cmd, args, kargs):
      if self.poll(): raise MPlayerNotRunning('MPlayer instance not running.')
      prefix = kargs.get('prefix', self.defaults.get('prefix', ''))
      default = kargs.get('default', self.defaults.get('default'))
      cmd = '%s %s' % (cmd, ' '.join(arg for arg in self.process_args(cmd, args)))
      self.calls.put([prefix, cmd, default])
      return self.results.get()
Ejemplo n.º 52
0
class TestSet(object):
    "Manage a set of test"

    def __init__(self, base_dir):
        """Initalise a test set
        @base_dir: base directory for tests
        """
        # Parse arguments
        self.base_dir = base_dir

        # Init internals
        self.task_done_cb = lambda tst, err: None # On task done callback
        self.task_new_cb = lambda tst: None       # On new task callback
        self.todo_queue = Queue()                 # Tasks to do
        self.message_queue = Queue()              # Messages with workers
        self.tests = []                           # Tests to run
        self.tests_done = []                      # Tasks done
        self.cpu_c = cpu_count()                  # CPUs available
        self.errorcode = 0                        # Non-zero if a test failed
        self.additional_args = []                 # Arguments to always add

    def __add__(self, test):
        "Same as TestSet.add"
        self.add(test)
        return self

    def add(self, test):
        "Add a test instance to the current test set"
        if not isinstance(test, Test):
            raise ValueError("%s is not a valid test instance" % (repr(test)))
        self.tests.append(test)

    def set_cpu_numbers(self, cpu_c):
        """Set the number of cpu to use
        @cpu_c: Number of CPU to use (default is maximum)
        """
        self.cpu_c = cpu_c

    def set_callback(self, task_done=None, task_new=None):
        """Set callbacks for task information retrieval
        @task_done: function(Test, Error message)
        @task_new: function(Test)
        """
        if task_done:
            self.task_done_cb = task_done
        if task_new:
            self.task_new_cb = task_new

    def add_tasks(self):
        "Add tests to do, regarding to dependencies"
        for test in self.tests:
            # Check dependencies
            launchable = True
            for dependency in test.depends:
                if dependency not in self.tests_done:
                    launchable = False
                    break

            if launchable:
                # Add task
                self.tests.remove(test)
                self.todo_queue.put(test)

        if len(self.tests) == 0:
            # Poison pills
            for _ in xrange(self.cpu_c):
                self.todo_queue.put(None)

        # All tasks done
        if len(self.tests_done) == self.init_tests_number:
            self.message_queue.put(MessageClose())

    def messages_handler(self):
        "Manage message between Master and Workers"

        # Main loop
        while True:
            message = self.message_queue.get()
            if isinstance(message, MessageClose):
                # Poison pill
                break
            elif isinstance(message, MessageTaskNew):
                # A task begins
                self.task_new_cb(message.task)
            elif isinstance(message, MessageTaskDone):
                # A task has been done
                self.tests_done.append(message.task)
                self.add_tasks()
                self.task_done_cb(message.task, message.error)
                if message.error is not None:
                    self.errorcode = -1
            else:
                raise ValueError("Unknown message type %s" % type(message))

    @staticmethod
    def worker(todo_queue, message_queue, init_args):
        """Worker launched in parrallel
        @todo_queue: task to do
        @message_queue: communication with Host
        @init_args: additionnal arguments for command line
        """

        # Main loop
        while True:
            # Acquire a task
            test = todo_queue.get()
            if test is None:
                break
            message_queue.put(MessageTaskNew(test))

            # Go to the expected directory
            current_directory = os.getcwd()
            os.chdir(test.base_dir)

            # Launch test
            testpy = subprocess.Popen(["python"] + init_args + test.command_line,
                                      stdout=subprocess.PIPE,
                                      stderr=subprocess.PIPE)
            outputs = testpy.communicate()

            # Check result
            error = None
            if testpy.returncode != 0:
                error = outputs[1]

            # Restore directory
            os.chdir(current_directory)

            # Report task finish
            message_queue.put(MessageTaskDone(test, error))

    def clean(self):
        "Remove produced files"

        for test in self.tests_done:
            # Go to the expected directory
            current_directory = os.getcwd()
            os.chdir(test.base_dir)

            # Remove files
            for product in test.products:
                try:
                    os.remove(product)
                except OSError:
                    print "Cleanning error: Unable to remove %s" % product

            # Restore directory
            os.chdir(current_directory)

    def add_additionnal_args(self, args):
        """Add arguments to used on the test command line
        @args: list of str
        """
        self.add_additionnal_args += args

    def run(self):
        "Launch tests"

        # Go in the right directory
        current_directory = os.getcwd()
        os.chdir(self.base_dir)

        # Launch workers
        processes = []
        for _ in xrange(self.cpu_c):
            p = Process(target=TestSet.worker, args=(self.todo_queue,
                                                     self.message_queue,
                                                     self.additional_args))

            processes.append(p)
            p.start()

        # Add initial tasks
        self.init_tests_number = len(self.tests)
        # Initial tasks
        self.add_tasks()

        # Handle messages
        self.messages_handler()

        # Close queue and join processes
        self.todo_queue.close()
        self.todo_queue.join_thread()
        self.message_queue.close()
        self.message_queue.join_thread()
        for p in processes:
            p.join()

        # Clean
        self.clean()

        # Restore directory
        os.chdir(current_directory)

    def tests_passed(self):
        "Return a non zero value if at least one test failed"
        return self.errorcode
Ejemplo n.º 53
0
def main(argv):
    parser = argparse.ArgumentParser()
    parser.add_argument('--server', default=DEFAULT_SERVER,
                        help=u'Elasticsearch hostname or IP (default {0})'.format(DEFAULT_SERVER))
    parser.add_argument('--port', default=DEFAULT_PORT,
                        help=u'Elasticsearch port (default {0})'.format(DEFAULT_PORT))
    parser.add_argument('--scanfile', help=u'Path to umich scan file you are ingesting. '
                                           u'Please make sure to decompress it')
    parser.add_argument('--initial', help=u'If this is the first file you are importing please use this flag',
                        action='store_true')
    args = parser.parse_args(argv[1:])

    if args.scanfile is None:
        logger.error("Please include a scanfile")
        sys.exit(1)

    workers = cpu_count()
    process_hosts_queue = Queue(maxsize=20000)
    process_certs_queue = Queue(maxsize=20000)

    for w in xrange(workers/2):
        #  Establish elasticsearch connection for each process
        es = Elasticsearch([{u'host': args.server, u'port': args.port}], timeout=30)
        p = Process(target=process_hosts, args=(process_hosts_queue, es, args.initial))
        p.daemon = True
        p.start()

    for w in xrange(workers/2):
        #  Establish elasticsearch connection for each process
        es = Elasticsearch([{u'host': args.server, u'port': args.port}], timeout=30)
        p = Process(target=process_scan_certs, args=(process_certs_queue, es))
        p.daemon = True
        p.start()

    logger.warning("Starting processing of {file} at {date}".format(file=args.scanfile, date=datetime.now()))

    # This is the bottle neck of the process but it works for now
    parse_scanfile(args.scanfile, process_hosts_queue, process_certs_queue)

    #  Once all the json lines have been put onto the queue. Add DONE so the queue workers know when to quit.
    for w in xrange(workers):
        process_hosts_queue.put("DONE")
        process_certs_queue.put("DONE")

    #  Close out the queue we are done
    process_hosts_queue.close()
    process_hosts_queue.join_thread()
    process_certs_queue.close()
    process_certs_queue.join_thread()

    #  this is kinda dirty but without looking up everything at insert time (slow) I don't know of a better way to do
    #  this based on the number of documents we will have
    refresh_es = Elasticsearch([{u'host': args.server, u'port': args.port}], timeout=30)
    # construct an elasticsearch query where the filter is looking for any entry that is missing the field first_seen
    q = {'size': 500, "query": {"match_all": {}}, "filter": {"missing": {"field": "first_seen"}}}

    new_updates = refresh_es.search(index='passive-ssl-hosts-umich', body=q)

    logger.warning("Numer of hosts to update is {count}".format(count=new_updates['hits']['total']))

    # Scan across all the documents missing the first_seen field and bulk update them
    missing_first_seen = scan(refresh_es, query=q, scroll='30m', index='passive-ssl-hosts-umich')

    bulk_miss = []
    for miss in missing_first_seen:
        last_seen = miss['_source']['last_seen']
        first_seen = last_seen
        action = {"_op_type": "update", "_index": "passive-ssl-hosts-umich", "_type": "host", "_id": miss['_id'],
                  "doc": {'first_seen': first_seen}}
        bulk_miss.append(action)
        if len(bulk_miss) == 500:
            bulk(refresh_es, bulk_miss)
            bulk_miss = []

    #  Get the remaining ones that are less than 000 and the loop has ended
    bulk(refresh_es, bulk_miss)
    logger.warning("{file} import finished at {date}".format(file=args.scanfile, date=datetime.now()))

    # Now we should optimize each index to max num segments of 1 to help with searching/sizing and just over all
    # es happiness
    logger.warning("Optimizing index: {index} at {date}".format(index='passive-ssl-hosts-umich', date=datetime.now()))
    refresh_es.indices.optimize(index='passive-ssl-hosts-umich', max_num_segments=1, request_timeout=7500)
    logger.warning("Optimizing index: {index} at {date}".format(index='passive-ssl-certs-umich', date=datetime.now()))
    refresh_es.indices.optimize(index='passive-ssl-certs-umich', max_num_segments=1, request_timeout=7500)
Ejemplo n.º 54
0
class SynergeticPool(Process):
    """Synergetic Process Pool: """

    def __init__(self, synergetic_servers=None, local_workers=1, syn_listener_port=41000):

        # Enable Class Method Pickling
        copy_reg.pickle(types.MethodType, _reduce_method)

        # Enable Descriptor Method Pickling
        copy_reg.pickle(types.MemberDescriptorType, _reduce_method_descriptor)

        # Dictionary of the synergetic servers' IP Addresses or Names with their connections
        self.__syn_servs = dict()

        # List of available Synergetic Processes
        self.__syn_pcss = list()

        # List of incomplete tasks recored
        self.__incomp_tsks = dict()  # # #  Not on USE for NOW

        # Start the Listener that is expecting new-coming synergetic-servers of synergetic-processes
        self.__start_synergetic_listener(listener_port=syn_listener_port)

        # Start the Synergetic-Pool's functionality
        self.__start_pool(synergetic_servers, local_workers_num=local_workers)
        # Start the Synergetic feeder that feeds the remote servers with Tasks
        # self.__start_synergetic_feeder()
        # Start the Synergetic Receiver for getting the results of the remote processes
        # self.__start_synergetic_recver()

    def register_mod(self, mod_list):

        __regstr_mods = dict()

        for mod in mod_list:

            try:
                fobj = open(mod + ".pyc", 'rb')

            except Exception as e:
                print("Synergeticprocessing.Pool--> Module registration failed: %s" % e)

            else:
                mod_bytecod = fobj.read()

            finally:
                fobj.close()

            # Registering mod.
            __regstr_mods[mod] = mod_bytecod

        for serv, conn in self.__syn_servs.items():

            if conn:

                try:
                    conn.send(('MODULES', __regstr_mods))
                    print 'MSG SND to ', serv

                except Exception as e:
                    raise Exception('Module Registration Error: %s' % e)

                ret = self.__return_queue.get()

                while ret != 'MODULES-READY':
                    self.__return_queue.put(ret)
                    ret = self.__return_queue.get()

                print('Modules Ready @ SynergeticServer: %s' % serv)

    def __start_pool(self, synergetic_servers, local_workers_num):

        # Initialise the Queues
        if synergetic_servers:
            syn_servs_num = len(synergetic_servers)
            self.__task_queue = JoinableQueue()  # syn_servs_num + local_workers_num)
            self.__return_queue = Queue(syn_servs_num + local_workers_num)
            #
            self.__start_local_pool(local_workers_num)
            #
            self.__start_serv_pool(synergetic_servers)
            # Start the Synergetic feeder that feeds the remote servers with Tasks
            self.__start_synergetic_feeder()
            # Start the Synergetic Receiver for getting the results of the remote processes
            self.__start_synergetic_recver()

        else:
            self.__task_queue = JoinableQueue()  # local_workers_num
            self.__return_queue = Queue(local_workers_num)
            #
            self.__start_local_pool(local_workers_num)
        # self.__task_queue = JoinableQueue(syn_servs_num + local_workers_num)
        # self.__return_queue = Queue(syn_servs_num + local_workers_num)
        # #
        # self.__start_local_pool(local_workers_num)
        # #
        # self.__start_serv_pool(synergetic_servers)

    def __start_synergetic_listener(self, listener_port):

        listener = Process(target=self.__synergetic_serv_listener, args=(listener_port,))
        listener.daemon = True
        listener.start()

    def __synergetic_serv_listener(self, listener_port):

        serv = Listener(('', listener_port), authkey='123456')

        while True:

            conn = serv.accept()

            try:
                server, port, authkey = conn.recv()

            except EOFError:
                recptn_failed = True

            if recptn_failed is False:
                conn.send('WELLCOME')
                conn.close()

    def __start_synergetic_feeder(self):
        feeder = Process(target=self.__feed_remote_syncss)
        feeder.daemon = True
        feeder.start()

    def __feed_remote_syncss(self):

        while True:

            for serv_addr, conn in self.__syn_servs.items():

                if conn:
                    Task = self.__task_queue.get()
                    self.__task_queue.task_done()

                    # Keep Task Until is completed
                    # if Task[0] != 'MODULES':
                    #     (task_id, func, args, kwargs) = Task
                    #     self.__incomp_tsks[serv_addr]= { task_id : (func, args, kwargs) }

                    try:
                        conn.send(Task)

                    except EOFError:
                        self.__syn_servs[serv_addr] = None

    def __start_synergetic_recver(self):
        recver = Process(target=self.__recv_remote_syncss)
        recver.daemon = True
        recver.start()

    def __recv_remote_syncss(self):

        while True:

            for serv_addr, conn in self.__syn_servs.items():

                if conn:

                    try:

                        return_msg = conn.recv()

                    except EOFError:

                        self.__syn_servs[serv_addr] = None

                    else:

                        self.__return_queue.put(return_msg)

                        if serv_addr in self.__incomp_tsks and return_msg != 'MODULES-READY':
                            del self.__incomp_tsks[serv_addr][return_msg[0]]

                # elif self.__incomp_tsks[serv_addr]:
                #     Tasks = self.__incomp_tsks[serv_addr]
                #     for task_id, (func, args, kwargs) in Tasks.items():
                #         print "Incomplete task putting back to Queue"
                #         self.__task_queue.put((task_id, func, args, kwargs))
                #         del self._incomp_tsks[serv_addr][task_id]
                #         self.__task_queue.task_done()

    def __synergetic_serv_connection(self, serv, port, auth):

        try:
            conn = Client((serv, port), authkey=str(auth))
        except:
            conn = None

        return conn

    def __start_local_pool(self, local_worker_num=1):

        for i in range(local_worker_num):
            self.__syn_pcss.append(SynergeticProcess(self.__task_queue, self.__return_queue))

        for syn_p in self.__syn_pcss:
            syn_p.daemonic = True
            syn_p.start()

    def __start_serv_pool(self, synergetic_servers):

        for serv, (port, auth) in synergetic_servers.items():

            print serv, port, auth

            conn = self.__synergetic_serv_connection(serv, port, auth)

            if conn:
                self.__syn_servs[serv] = conn
            else:
                # Maybe this will be DEPRICATED
                self.__syn_servs[serv] = None

    def __dispatch(self, func, *args, **kwargs):

        task_id = str(random.randrange(1, 100000000))
        task = (task_id, func, args, kwargs)
        self.__task_queue.put(task)

        return task_id

    def dispatch(self, func, *args, **kwargs):

        task_id = self.__dispatch(func, *args, **kwargs)

        return ResaultIterator(self.__return_queue, [task_id])

    def imap(self, func, iterable=None, chank=0, callback=None):

        task_ids = list()

        if chank == 0:

            # self.__task_queue = JoinableQueue(len(iterable))
            # self.__return_queue = Queue(len(iterable))

            count = 0

            for itr_item in iterable:
                # task_ids = list()
                # resault = ResaultIterator(self.__return_queue, task_ids)
                # resault._job(self.__dispatch, func, iterable))
                count += 1
                print "dispatching", count
                task_id = self.__dispatch(func, itr_item)
                task_ids.append(task_id)

            # return resault

        else:

            chank_size = len(iterable) / chank

            chank_res_num = len(iterable) % chank

            for i in range(chank):
                start_pntr = i * chank_size
                end_pntr = (i+1) * chank_size
                itr_chank = iterable[start_pntr:end_pntr]
                task_id = self.__dispatch(func, itr_chank)
                task_ids.append(task_id)

            if chank_res_num:
                itr_chank = iterable[end_pntr:(end_pntr + chank_res_num)]
                task_id = self.__dispatch(func, itr_chank)
                task_ids.append(task_id)

        print "Returning"

        return ResaultIterator(self.__return_queue, task_ids)

    def map(self, func, iterable=None, chank=1, callback=None):

        ret = list()

        iter_ret = self.imap(func, iterable, chank, callback)

        for ret_item in iter_ret:
            ret.append(ret_item)

        return ret

    def join_all(self, timeout=None):

        for serv, conn in self.__syn_servs.items():

            if conn:
                conn.close()
                print("Connection to Synergetic-server:%s CLOSED" % serv)

        self.__task_queue.put((None, None, None, None))
        self.__task_queue.task_done()
        self.__task_queue.join()
        self.__return_queue.close()
        self.__return_queue.join_thread()

    @property
    def remote_prcss_num(self):
        return len(self.__syn_pcss)

    @property
    def local_prcss_num(self):
        return len(self.__syn_servs)
Ejemplo n.º 55
0
class QueuedWriter(object):
    """ A queued multiprocess writter.

    """

    def __init__(self, func):
        """ Queue_Writer(func) -> Fill a queue and call func with data as it is
        able to use it.

        """

        self._func = func

        # Queue to fill with data to be written.
        self._data_queue = Queue()

        # Queue to get the return value from the write function.
        self._return_queue = Queue()

        # Create the writer process.
        self._writer_p = Process(target=_queue_writer,
                                 args=(func, self._data_queue,
                                       self._return_queue))
        self._writer_p.start()

    def __repr__(self):
        """ __repr__ -> Returns a python expression to recreate this instance.

        """

        repr_str = "%(_func)s" % self.__dict__

        return '%s(%s)' % (self.__class__.__name__, repr_str)

    def write(self, data):
        """ Send more data down the queue to the processing function.

        """

        self._data_queue.put(data)

        return len(data)

    __call__ = write

    def close(self):
        """ Close the queue and writer process.

        """

        # Send EOF to end the writer process.
        self._data_queue.put('EOF')

        # Close the queue.
        self._data_queue.close()

        # Wait for the queue buffer to empty.
        self._data_queue.join_thread()

        # Get the return value from the writer process.
        ret_val = self._return_queue.get()

        # Close the return queue.
        self._return_queue.close()

        # Wait for the writer process to exit.
        self._writer_p.join()

        # Return the writer result.
        return ret_val

    def __enter__(self):
        """ Provides the ability to use pythons with statement.

        """

        try:
            return self
        except Exception as err:
            print(err)
            return None

    def __exit__(self, exc_type, exc_value, traceback):
        """ Close the file when finished.

        """

        try:
            return self.close() or not bool(exc_type)
        except Exception as err:
            print(err)
            return False
Ejemplo n.º 56
0
class Worker:
    """This class is used for poller and reactionner to work.
    The worker is a process launch by theses process and read Message in a Queue
    (self.s) (slave)
    They launch the Check and then send the result in the Queue self.m (master)
    they can die if they do not do anything (param timeout)

    """

    _id = 0  # None
    _process = None
    _mortal = None
    _idletime = None
    _timeout = None
    _control_q = None

    def __init__(self, _id, slave_q, returns_queue, processes_by_worker, mortal=True, timeout=300,
                 max_plugins_output_length=8192, target=None, loaded_into='unknown',
                 http_daemon=None):
        self._id = self.__class__._id
        self.__class__._id += 1

        self._mortal = mortal
        self._idletime = 0
        self._timeout = timeout
        self.slave_q = None
        self.processes_by_worker = processes_by_worker
        self._control_q = Queue()  # Private Control queue for the Worker
        # By default, take our own code
        if target is None:
            target = self.work
        self._process = Process(target=target, args=(slave_q, returns_queue, self._control_q))
        self.returns_queue = returns_queue
        self.max_plugins_output_length = max_plugins_output_length
        self.i_am_dying = False
        # Keep a trace where the worker is launch from (poller or reactionner?)
        self.loaded_into = loaded_into
        if os.name != 'nt':
            self.http_daemon = http_daemon
        else:  # windows forker do not like pickle http/lock
            self.http_daemon = None

    def is_mortal(self):
        """
        Accessor to _mortal attribute

        :return: A boolean indicating if the worker is mortal or not.
        :rtype: bool
        """
        return self._mortal

    def start(self):
        """
        Start the worker. Wrapper for calling start method of the process attribute

        :return: None
        """
        self._process.start()

    def terminate(self):
        """
        Wrapper for calling terminate method of the process attribute
        Also close queues (input and output) and terminate queues thread

        :return: None
        """
        # We can just terminate process, not threads
        self._process.terminate()
        # Is we are with a Manager() way
        # there should be not such functions
        if hasattr(self._control_q, 'close'):
            self._control_q.close()
            self._control_q.join_thread()
        if hasattr(self.slave_q, 'close'):
            self.slave_q.close()
            self.slave_q.join_thread()

    def join(self, timeout=None):
        """
         Wrapper for calling join method of the process attribute

        :param timeout: time to wait for the process to terminate
        :type timeout: int
        :return: None
        """
        self._process.join(timeout)

    def is_alive(self):
        """
        Wrapper for calling is_alive method of the process attribute

        :return: A boolean indicating if the process is alive
        :rtype: bool
        """
        return self._process.is_alive()

    def is_killable(self):
        """
        Determine whether a process is killable :

        * process is mortal
        * idletime > timeout

        :return: a boolean indicating if it is killable
        :rtype: bool
        """
        return self._mortal and self._idletime > self._timeout

    def add_idletime(self, time):
        """
        Increment idletime

        :param time: time to increment in seconds
        :type time: int
        :return: None
        """
        self._idletime += time

    def reset_idle(self):
        """
        Reset idletime (set to 0)

        :return: None
        """
        self._idletime = 0

    def send_message(self, msg):
        """
        Wrapper for calling put method of the _control_q attribute

        :param msg: the message to put in queue
        :type msg: str
        :return: None
        """
        self._control_q.put(msg)

    def set_zombie(self):
        """
        Set the process as zombie (mortal to False)

        :return:None
        """
        self._mortal = False

    def get_new_checks(self):
        """
        Get new checks if less than nb_checks_max
        If no new checks got and no check in queue, sleep for 1 sec
        REF: doc/alignak-action-queues.png (3)

        :return: None
        """
        try:
            while len(self.checks) < self.processes_by_worker:
                # print "I", self._id, "wait for a message"
                msg = self.slave_q.get(block=False)
                if msg is not None:
                    self.checks.append(msg.get_data())
                # print "I", self._id, "I've got a message!"
        except Empty, exp:
            if len(self.checks) == 0:
                self._idletime += 1
                time.sleep(1)
        # Maybe the Queue() is not available, if so, just return
        # get back to work :)
        except IOError, exp:
            return
Ejemplo n.º 57
0
class Sentinel(object):
    def __init__(self, stop_event, start_event, list_key=Conf.Q_LIST, timeout=Conf.TIMEOUT, start=True):
        # Make sure we catch signals for the pool
        signal.signal(signal.SIGINT, signal.SIG_IGN)
        signal.signal(signal.SIGTERM, signal.SIG_DFL)
        self.pid = current_process().pid
        self.parent_pid = os.getppid()
        self.name = current_process().name
        self.list_key = list_key
        self.r = redis_client
        self.reincarnations = 0
        self.tob = timezone.now()
        self.stop_event = stop_event
        self.start_event = start_event
        self.pool_size = Conf.WORKERS
        self.pool = []
        self.timeout = timeout
        self.task_queue = Queue()
        self.result_queue = Queue()
        self.event_out = Event()
        self.monitor = Process()
        self.pusher = Process()
        if start:
            self.start()

    def start(self):
        self.spawn_cluster()
        self.guard()

    def status(self):
        if not self.start_event.is_set() and not self.stop_event.is_set():
            return Conf.STARTING
        elif self.start_event.is_set() and not self.stop_event.is_set():
            if self.result_queue.qsize() == 0 and self.task_queue.qsize() == 0:
                return Conf.IDLE
            return Conf.WORKING
        elif self.stop_event.is_set() and self.start_event.is_set():
            if self.monitor.is_alive() or self.pusher.is_alive() or len(self.pool) > 0:
                return Conf.STOPPING
            return Conf.STOPPED

    def spawn_process(self, target, *args):
        """
        :type target: function or class
        """
        # This is just for PyCharm to not crash. Ignore it.
        if not hasattr(sys.stdin, 'close'):
            def dummy_close():
                pass

            sys.stdin.close = dummy_close
        p = Process(target=target, args=args)
        p.daemon = True
        if target == worker:
            p.timer = args[2]
            self.pool.append(p)
        p.start()
        return p

    def spawn_pusher(self):
        return self.spawn_process(pusher, self.task_queue, self.event_out, self.list_key, self.r)

    def spawn_worker(self):
        self.spawn_process(worker, self.task_queue, self.result_queue, Value('f', -1), self.timeout)

    def spawn_monitor(self):
        return self.spawn_process(monitor, self.result_queue)

    def reincarnate(self, process):
        """
        :param process: the process to reincarnate
        :type process: Process
        """
        if process == self.monitor:
            self.monitor = self.spawn_monitor()
            logger.error(_("reincarnated monitor {} after sudden death").format(process.name))
        elif process == self.pusher:
            self.pusher = self.spawn_pusher()
            logger.error(_("reincarnated pusher {} after sudden death").format(process.name))
        else:
            self.pool.remove(process)
            self.spawn_worker()
            if self.timeout and int(process.timer.value) == 0:
                # only need to terminate on timeout, otherwise we risk destabilizing the queues
                process.terminate()
                logger.warn(_("reincarnated worker {} after timeout").format(process.name))
            elif int(process.timer.value) == -2:
                logger.info(_("recycled worker {}").format(process.name))
            else:
                logger.error(_("reincarnated worker {} after death").format(process.name))

        self.reincarnations += 1

    def spawn_cluster(self):
        self.pool = []
        Stat(self).save()
        # spawn worker pool
        for i in range(self.pool_size):
            self.spawn_worker()
        # spawn auxiliary
        self.monitor = self.spawn_monitor()
        self.pusher = self.spawn_pusher()
        # set worker cpu affinity if needed
        if psutil and Conf.CPU_AFFINITY:
            set_cpu_affinity(Conf.CPU_AFFINITY, [w.pid for w in self.pool])

    def guard(self):
        logger.info(_('{} guarding cluster at {}').format(current_process().name, self.pid))
        self.start_event.set()
        Stat(self).save()
        logger.info(_('Q Cluster-{} running.').format(self.parent_pid))
        scheduler(list_key=self.list_key)
        counter = 0
        cycle = 0.5  # guard loop sleep in seconds
        # Guard loop. Runs at least once
        while not self.stop_event.is_set() or not counter:
            # Check Workers
            for p in self.pool:
                # Are you alive?
                if not p.is_alive() or (self.timeout and p.timer.value == 0):
                    self.reincarnate(p)
                    continue
                # Decrement timer if work is being done
                if self.timeout and p.timer.value > 0:
                        p.timer.value -= cycle
            # Check Monitor
            if not self.monitor.is_alive():
                self.reincarnate(self.monitor)
            # Check Pusher
            if not self.pusher.is_alive():
                self.reincarnate(self.pusher)
            # Call scheduler once a minute (or so)
            counter += cycle
            if counter == 30:
                counter = 0
                scheduler(list_key=self.list_key)
            # Save current status
            Stat(self).save()
            sleep(cycle)
        self.stop()

    def stop(self):
        Stat(self).save()
        name = current_process().name
        logger.info('{} stopping cluster processes'.format(name))
        # Stopping pusher
        self.event_out.set()
        # Wait for it to stop
        while self.pusher.is_alive():
            sleep(0.1)
            Stat(self).save()
        # Put poison pills in the queue
        for _ in range(len(self.pool)):
            self.task_queue.put('STOP')
        self.task_queue.close()
        # wait for the task queue to empty
        self.task_queue.join_thread()
        # Wait for all the workers to exit
        while len(self.pool):
            for p in self.pool:
                if not p.is_alive():
                    self.pool.remove(p)
            sleep(0.1)
            Stat(self).save()
        # Finally stop the monitor
        self.result_queue.put('STOP')
        self.result_queue.close()
        # Wait for the result queue to empty
        self.result_queue.join_thread()
        logger.info('{} waiting for the monitor.'.format(name))
        # Wait for everything to close or time out
        count = 0
        if not self.timeout:
            self.timeout = 30
        while self.status() == Conf.STOPPING and count < self.timeout * 10:
            sleep(0.1)
            Stat(self).save()
            count += 1
        # Final status
        Stat(self).save()
Ejemplo n.º 58
0
class RecordTest:
    """A context manager that records a test to the database.
    Can also be invoked with record=False as a noop."""
    def __init__(self, record=True, reinstall_on_failure=True,
                 description=None, build=None, dut=None,
                 stdout_filter=None, result_id=None,
                 record_finish=True, job_id=None):
        self.record = record
        self.description = description
        self.reinstall_on_failure = reinstall_on_failure
        self.build = build
        self.dut = dut
        self.result_id = result_id
        self.mdb = self.result_id = self.dut_id = None
        self.stdout_filter = stdout_filter
        self.record_queue = self.stream_process = None
        self.record_finish = record_finish
        self.failed = False
        self.job_id = job_id
    def __enter__(self):
        """Start recording a test"""
        try:
            run(['logger', 'BVT', 'starting', self.full_description()], 
                host=self.dut, timeout=10)
        except SubprocessError:
            print 'INFO: unable to mark test log'
        if not self.record:
            return self
        if self.result_id is None:
            self.mdb = get_autotest()
            terms = {'test_case':self.description or 'to be determined',
                     'automation_user': getpwuid(getuid()).pw_gecos.split(',')[0],
                     'control_pid' : getpid(), 'start_time' : time(),
                     'development_mode' : 0,
                     'command_line':abbreviate(' '.join(sys.argv))}
            if self.dut:
                dutdoc = self.mdb.duts.find_one({'name':self.dut})
                self.dut_id = terms['dut'] = dutdoc['_id']
                terms['dut_name'] = dutdoc['name']
                if 'development_mode' in dutdoc:
                    terms['development_mode'] = dutdoc['development_mode']
            self.result_id = self.mdb.results.save(terms)
            if self.job_id is not None:
                self.mdb.jobs.update({'_id':objectid.ObjectId(self.job_id)}, {'$set':{'results_id':self.result_id}})
            if self.build is None and self.dut:
                self.build = get_build(self.dut, timeout=10)
            self.mdb.results.update({'_id':self.result_id}, 
                                    {'$set':{'build':self.build}})
            if self.dut:
                self.mdb.duts.update({'_id':terms['dut']}, {'$set': {
                            'build':self.build,
                            'control_command_line': abbreviate(' '.join(sys.argv)),
                            'result_id' : self.result_id}})
        if self.stdout_filter:
            self.record_queue = Queue()
            self.stream_process = Process(
                target=service_queue, 
                args=[self.record_queue, self.result_id, 
                      self.dut, self.dut_id])
            self.stream_process.start()
            self.stdout_filter.add_callback(self, 
                                            lambda *x: self.record_queue.put(x))

        if self.description:
            print 'HEADLINE: starting', self.full_description()
        get_track().updates.save({'result_id':self.result_id,
                                  'action':'new result record'})
        return self
    def __exit__(self, _type, value, traceback):
        """Finish a test, recording the exception as a test failure,
        or regarding a test as passed if there is no exception."""
        try:
            if not self.record_finish:
                return
            print >>sys.stderr, 'record', self.record
            dutset = {'last_finish_time':time()}
            if not self.record:
                return
            upd = {'end_time': time(), 'modification_time':time()}

            if value: # i.e. , if test failed:
                upd['failure'] = repr(value)
                upd['exception'] = value.__class__.__name__
                if not isinstance(value, KeyboardInterrupt):
                    print 'HEADLINE: exception', upd['exception'], value
                    for clause in format_exception(_type, value, traceback):
                        for line in clause.split('\n'):
                            print 'CRASH:', line
                else:
                    upd['infrastructure_problem'] = True
                    upd['whiteboard'] = '[infrastructure] test interrupted'
                if self.reinstall_on_failure:
                    dutset['test_failed'] = True
                    tnext = time() + 300
                    print 'INFO: test failed, so will reinstall machine at', \
                        asctime(localtime(tnext))

            if self.failed: #some test suite failed
                upd['failure'] = 'test failed'

            self.mdb.results.update({'_id':self.result_id}, {'$set':upd})
            classify = process_result(self.mdb.results.find_one({'_id':self.result_id}))
            print 'HEADLINE:', classify, self.full_description()

            get_track().updates.save({'result_id':self.result_id,
                                      'action':'experiment finished'})

            if self.dut_id:
                self.mdb.duts.update({'_id':self.dut_id}, 
                                     {'$unset': {'control_pid':1, 'result_id':1,
                                                 'control_command_line':1},
                                      '$set': dutset})
            if self.build:
                recount(self.build)
            if classify == 'infrastructure_problems':
                pass
            else:
                col = 'green' if classify == 'passes' else 'red'
        finally:
            if self.record_queue:
                self.record_queue.put('finish')
                self.record_queue.close()
                self.record_queue.join_thread()
            if self.stream_process:
                self.stream_process.join()
            if self.stdout_filter:
                self.stdout_filter.del_callback(self)
            
    def full_description(self):
        """Work out a full test description"""
        des = describe_dut(self.dut) if self.dut else ''
        if self.build:
            des += ' with ' + self.build
        if self.result_id:
            des += ' BVT result ID ' + str(self.result_id)
        return (self.description if self.description 
                else 'unknown test') + ' on ' +  des
    def set_description(self, description):
        """Set description (sometimes this is not known at the start
        of the test"""
        self.description = description
        if not self.record:
            return
        self.mdb.results.update({'_id':self.result_id}, 
                                 {'$set':{'test_case':description}})
    def set_build(self, build):
        """Set build (sometimes this is not known at the start of the test"""
        self.build = build
        if not self.record:
            return
        self.mdb.results.update({'_id':self.result_id}, 
                                {'$set':{'build':build}})


    # Update the time, result, and failure reason for a step in a test suite.
    def update_step(self, suite, i, result, reason=''):
        self.mdb.suiteresults.update({'result_id':self.result_id, 'suite':suite},
                                     {'$set':{'step'+str(i):result, 'step%s-reason'%str(i):reason,
                                                'step%s-end'%str(i):time()}})

    # Update the end time, result, and failure reason for a test suite
    def update_suite(self, suite, result, reason=''):
        self.mdb.suiteresults.update({'result_id':self.result_id, 'suite':suite},
                                     {'$set':{'result':result, 'finish_time':time(), 'reason':reason}})

    # Allocate the initial doc in the suiteresults collection in mongo for this run.
    def gen_suite_log(self, suite, num_steps):
        self.mdb.suiteresults.save({'result_id':self.result_id, 'suite':suite, 'steps':num_steps, 'start_time':time()})   
    
    # Indicate the start time for one step in a test suite
    def step_start(self, suite, i):
        self.mdb.suiteresults.update({'result_id':self.result_id, 'suite':suite},
                                     {'$set':{'step%s-start'%str(i):time()}})