Ejemplo n.º 1
0
def export_table(host, port, auth_key, db, table, directory, fields, delimiter, format,
                 error_queue, progress_info, sindex_counter, exit_event):
    writer = None

    try:
        # This will open at least one connection for each rdb_call_wrapper, which is
        # a little wasteful, but shouldn't be a big performance hit
        conn_fn = lambda: r.connect(host, port, auth_key=auth_key)
        table_info = rdb_call_wrapper(conn_fn, "info", write_table_metadata, db, table, directory)
        sindex_counter.value += len(table_info["indexes"])

        task_queue = SimpleQueue()
        writer = launch_writer(format, directory, db, table, fields, delimiter, task_queue, error_queue)
        writer.start()

        rdb_call_wrapper(conn_fn, "table scan", read_table_into_queue, db, table,
                         table_info["primary_key"], task_queue, progress_info, exit_event)
    except (r.ReqlError, r.ReqlDriverError) as ex:
        error_queue.put((RuntimeError, RuntimeError(ex.message), traceback.extract_tb(sys.exc_info()[2])))
    except:
        ex_type, ex_class, tb = sys.exc_info()
        error_queue.put((ex_type, ex_class, traceback.extract_tb(tb)))
    finally:
        if writer is not None and writer.is_alive():
            task_queue.put(StopIteration())
            writer.join()
Ejemplo n.º 2
0
    def run(self, tasks, render, update, render_args=(), render_kwargs={}, update_args=(), update_kwargs={}):

        # establish ipc queues using a manager process
        task_queue = SimpleQueue()
        result_queue = SimpleQueue()

        # start process to generate image samples
        producer = Process(target=self._producer, args=(tasks, task_queue))
        producer.start()

        # start worker processes
        workers = []
        for pid in range(self._processes):
            p = Process(target=self._worker, args=(render, render_args, render_kwargs, task_queue, result_queue))
            p.start()
            workers.append(p)

        # consume results
        for _ in tasks:
            result = result_queue.get()
            update(result, *update_args, **update_kwargs)

        # shutdown workers
        for _ in workers:
            task_queue.put(None)
Ejemplo n.º 3
0
async def data_from_file(main2gvf: mp.SimpleQueue,
                         coder: KanervaCoder):
    data = np.load('offline_data.npy')
    for i, item in enumerate(data):
        # if i > 500:
        #     break
        item[-1] = coder(x1=item[-1], x2=item[-2])
        main2gvf.put(item)
Ejemplo n.º 4
0
def merge_db(db_folder, new_db_name, db_to_merge):

    assert path.exists(db_folder), '`{}` is a wrong path to db folder, please correct it.'.format(db_folder)

    shutdown = Event()
    writer_queue = SimpleQueue()

    writer = Writer(db_folder=db_folder, db_name=new_db_name, queue=writer_queue, shutdown=shutdown)
    reader = Reader(db_folder=db_folder, db_to_merge=db_to_merge,
                    queue=writer_queue, shutdown=shutdown)

    reader.start()
    writer.start()

    pbar = tqdm(total=len(db_to_merge))

    c = 0
    while not shutdown.is_set():
        try:
            new_c = writer.counter.value
            progress = new_c - c
            if progress > 0:
                pbar.update(progress)
                c = new_c
            Event().wait(2)

        except KeyboardInterrupt:
            print()
            print("Main thread grab the keyboard interrupt")
            break

    shutdown.set()
    pbar.close()
    # writer.join()
    # reader.join()

    print("writer alive", writer.is_alive())
    print("reader alive", reader.is_alive())

    if writer.is_alive():

        print("Waiting writer...")
        writer.join()

    print("WRITER EXECUTED")

    if reader.is_alive():
        print("Waiting reader...")
        writer_queue.get()
        print("Waiting reader 2...")
        reader.join()

    print("READER EXECUTED")

    print("Done.")
Ejemplo n.º 5
0
def fork_process(logger, group=None, target=None, name=None, args=(), kwargs={}):
    """
    Forks a child, making sure that all exceptions from the child are safely sent to the parent
    If a target raises an exception, the exception is re-raised in the parent process
    @return tuple consisting of process exit code and target's return value
    """
    if is_windows():
        logger.warn(
            "Not forking for %s due to Windows incompatibilities (see #184). "
            "Measurements (coverage, etc.) might be biased." % target
        )
        return fake_windows_fork(group, target, name, args, kwargs)
    try:
        sys.modules["tblib.pickling_support"]
    except KeyError:
        import tblib.pickling_support

        tblib.pickling_support.install()

    q = SimpleQueue()

    def instrumented_target(*args, **kwargs):
        ex = tb = None
        try:
            send_value = (target(*args, **kwargs), None, None)
        except:
            _, ex, tb = sys.exc_info()
            send_value = (None, ex, tb)

        try:
            q.put(send_value)
        except:
            _, send_ex, send_tb = sys.exc_info()
            e_out = Exception(str(send_ex), send_tb, None if ex is None else str(ex), tb)
            q.put(e_out)

    p = Process(group=group, target=instrumented_target, name=name, args=args, kwargs=kwargs)
    p.start()
    result = q.get()
    p.join()
    if isinstance(result, tuple):
        if result[1]:
            raise_exception(result[1], result[2])
        return p.exitcode, result[0]
    else:
        msg = "Fatal error occurred in the forked process %s: %s" % (p, result.args[0])
        if result.args[2]:
            chained_message = "This error masked the send error '%s':\n%s" % (
                result.args[2],
                "".join(traceback.format_tb(result.args[3])),
            )
            msg += "\n" + chained_message
        ex = Exception(msg)
        raise_exception(ex, result.args[1])
Ejemplo n.º 6
0
def learning_loop(exit_flag: mp.Value,
                  gvfs: Sequence[Sequence[Learner]],
                  behaviour_gvf: SARSA,
                  main2gvf: mp.SimpleQueue,
                  gvf2main: mp.SimpleQueue,
                  gvf2plot: mp.SimpleQueue):
    action, action_prob, obs, x = None, None, None, None

    # get first state
    while exit_flag.value == 0 and obs is None:
        while exit_flag.value == 0 and main2gvf.empty():
            time.sleep(0.001)
        if exit_flag.value == 0:
            obs, x = main2gvf.get()
            action, action_prob = behaviour_gvf.policy(obs=obs, x=x)
            gvf2main.put(action)

    # main loop
    while exit_flag.value == 0:
        while exit_flag.value == 0 and main2gvf.empty():
            time.sleep(0.001)
        if exit_flag.value:
            break

        # get data from servos
        obsp, xp = main2gvf.get()
        actionp, action_probp = behaviour_gvf.policy(obs=obsp, x=xp)

        # update weights
        for g in chain.from_iterable(gvfs):
            g.update(x, obs,
                     action, action_prob,
                     xp, obsp,
                     actionp, action_probp)

        # send action
        gvf2main.put(actionp)

        # send data to plots
        gdata = [[g.data(x, obs, action, xp, obsp)
                  for g in gs]
                 for gs in gvfs]
        data = dict(ChainMap(*chain.from_iterable(gdata)))
        data['obs'] = obs
        gvf2plot.put(data)

        # go to next state
        obs = obsp
        x = xp
        action = actionp
        action_prob = action_probp

    print('Done learning!')
Ejemplo n.º 7
0
async def data_from_file(main2gvf: mp.SimpleQueue,
                         gvf2plot: mp.SimpleQueue,
                         coder: KanervaCoder):
    data = np.load('offline_data.npy')

    for item in data:
        item[-1] = coder(item[-2])
        main2gvf.put(item)

    time.sleep(0.1)
    while not gvf2plot.empty():
        time.sleep(0.1)
Ejemplo n.º 8
0
    def _fit(self, X, y, blocks):
        """Fit base clustering estimators on X."""
        self.blocks_ = blocks

        processes = []
        # Here the blocks will be passed to subprocesses
        data_queue = SimpleQueue()
        # Here the results will be passed back
        result_queue = SimpleQueue()
        for x in range(self.n_jobs):
            processes.append(mp.Process(target=_parallel_fit, args=(self.fit_,
                             self.partial_fit_, self.base_estimator,
                             self.verbose, data_queue, result_queue)))
            processes[-1].start()

        # First n_jobs blocks are sent into the queue without waiting for the
        # results. This variable is a counter that takes care of this.
        presend = 0
        blocks_computed = 0
        blocks_all = len(np.unique(blocks))

        for block in self._blocks(X, y, blocks):
            if presend >= self.n_jobs:
                b, clusterer = result_queue.get()
                blocks_computed += 1
                if clusterer:
                    self.clusterers_[b] = clusterer
            else:
                presend += 1
            if self.partial_fit_:
                if block[0] in self.clusterers_:
                    data_queue.put(('middle', block, self.clusterers_[b]))
                    continue

            data_queue.put(('middle', block, None))

        # Get the last results and tell the subprocesses to finish
        for x in range(self.n_jobs):
            if blocks_computed < blocks_all:
                print("%s blocks computed out of %s" % (blocks_computed,
                                                        blocks_all))
                b, clusterer = result_queue.get()
                blocks_computed += 1
                if clusterer:
                    self.clusterers_[b] = clusterer

        data_queue.put(('end', None, None))

        time.sleep(1)

        return self
Ejemplo n.º 9
0
	def __init__(self, callback, pool_size=1, check_intervall=2):
		self.task_queue = SimpleQueue()
		self.result_queue = SimpleQueue()
		self._callback = callback
		self._pool = {}  # {process_name: process}
		self._tasks = {}  # {task_id: process_name}
		for _ in range(pool_size):
			process = self.Process(self.task_queue, self.result_queue)
			self._pool[process.name] = process
			process.start()
		# Check for progress periodically TODO: stop timer when queue is empty!
		self.timer = QTimer()
		self.timer.timeout.connect(self._check_for_results)
		self.timer.start(check_intervall * 1000)
Ejemplo n.º 10
0
def plotting_loop(exit_flag: mp.Value,
                  gvf2plot: mp.SimpleQueue,
                  plots: Sequence[Plot]):
    while exit_flag.value == 0:
        if locks:
            print('plot gp a 1 a')
            gplock.acquire()
            print('plot gp a 1 b')
        while exit_flag.value == 0 and gvf2plot.empty():
            if locks:
                print('plot gp r 1 a')
                gplock.release()
                print('plot gp r 1 b')
            time.sleep(0.001)
            if locks:
                print('plot gp a 2 a')
                gplock.acquire()
                print('plot gp a 2 b')

        if locks:
            print('plot gp r 2 a')
            gplock.release()
            print('plot gp r 2 b')
        if exit_flag.value:
            break

        if locks:
            print('plot gp a 3 a')
            gplock.acquire()
            print('plot gp a 3 b')
        d = gvf2plot.get()
        if locks:
            print('plot gp r 3 a')
            gplock.release()
            print('plot gp r 3 b')

        for plot, data in zip(plots, d):
            plot.update(data)

    for plot in plots:
        try:
            index = np.arange(len(plot.y[0]))
            np.savetxt(f"{plot.title}.csv",
                       np.column_stack(sum(((np.asarray(y),) for y in plot.y),
                                           (index,))),
                       delimiter=',')
        except ValueError:
            continue
Ejemplo n.º 11
0
 def __init__(self, db_file="sqlite_db.sqlite", lock_wait_time=120):
     self.db_file = db_file
     self.connection = sqlite3.connect(self.db_file)
     self.broker_cursor = self.connection.cursor()
     self.broker_queue = SimpleQueue()
     self.broker = None
     self.lock_wait_time = lock_wait_time
Ejemplo n.º 12
0
    def _open_frontend(self):
        from multiprocessing import Process, SimpleQueue

        connection = SimpleQueue()
        frontend = Process(
            target=self._open_frontend_process,
            args=(connection, [k for k in sys.argv[1:] if k != "--frontend"]))
        frontend.start()
        cmdline = connection.get()
        frontend.join()
        if self.interactive:
            argv_backup = list(sys.argv)
        sys.argv[1:] = cmdline.split()
        Main.setup_argv(True, True)
        if self.interactive:
            sys.argv = argv_backup
        print("Running with the following command line: %s" % sys.argv)
def start(parsed_args):
    from multiprocessing import Process, SimpleQueue

    processes = []
    msg_queue = SimpleQueue()
    word_count_queue = SimpleQueue()
    unique_words_queue = SimpleQueue()
    median_queue = SimpleQueue()

    # Prep workers to read from msg queue and write to other queues
    for i in range(workers):
        p = Process(target=worker,
                      args=(msg_queue, unique_words_queue, word_count_queue))
        processes.append(p)
        p.start()

    # Prep a process to accumulate word_count_queue for ft1.txt
    p = Process(target=accumulator,
                  args=(word_count_queue, parsed_args.outdir))
    processes.append(p)
    p.start()

    # Prep a process to re-sequence unique words counted
    p = Process(target=buffered_resequener,
                  args=(unique_words_queue, median_queue))
    processes.append(p)
    p.start()

    # Prep a process to keep a running median of unique words for ft2.txt
    p = Process(target=running_median,
                  args=(median_queue, parsed_args.outdir))
    processes.append(p)
    p.start()

    # Start reading msgs for the msg_queue
    ingest(parsed_args.file, msg_queue)

    # Sending an indication to stop, one for each worker
    for i in range(workers):
        msg_queue.put(None)

    # This step gathers the child processes, but may be unnecessary
    for p in processes:
        p.join()
Ejemplo n.º 14
0
def plotting_loop(exit_flag: mp.Value,
                  gvf2plot: mp.SimpleQueue,
                  plots: Sequence[Plot]):

    while exit_flag.value == 0:
        while exit_flag.value == 0 and gvf2plot.empty():
            time.sleep(0.001)
        if exit_flag.value:
            break
        data = gvf2plot.get()

        for plot in plots:
            plot.update(data)

    for plot in plots:
        index = np.arange(len(plot.y[0]))
        np.savetxt(f"{plot.title}.csv",
                   sum(((np.asarray(y),) for y in plot.y), (index,)),
                   delimiter=',')
Ejemplo n.º 15
0
    def __init__(self, server, nickname, user, host='localhost'):
        self.server = server
        self.nickname = nickname
        self.realname = nickname
        self.user = user
        self.host = host

        self._readbuffer = ""
        self._writebuffer = ""
        self.request_queue = SimpleQueue()
        self.response_queue = SimpleQueue()

        # dict of board => list of users
        self.board_watchers = defaultdict(list)

        # dict of board, thread => list of users
        self.thread_watchers = defaultdict(lambda: defaultdict(list))

        Process(
            target=Ami,
            name='immediate api worker',
            args=(self.request_queue, self.response_queue)
        ).start()
Ejemplo n.º 16
0
Archivo: ami.py Proyecto: ATRAN2/Futami
    def __init__(self, request_queue, response_queue):
        self.request_queue = request_queue
        self.response_queue = response_queue
        self.update_request_queue = SimpleQueue()

        Process(
            target=self.update_loop,
            name='periodic api worker',
            args=(response_queue, self.update_request_queue),
        ).start()

        logger.debug("initialization complete")

        self.request_loop()
Ejemplo n.º 17
0
def learning_loop(exit_flag: mp.Value,
                  gvfs: Sequence[Sequence[GTDLearner]],
                  main2gvf: mp.SimpleQueue,
                  gvf2plot: mp.SimpleQueue):
    action, action_prob, obs, x = None, None, None, None

    # get first state
    while exit_flag.value == 0 and obs is None:
        while exit_flag.value == 0 and main2gvf.empty():
            time.sleep(0.001)
        if exit_flag.value == 0:
            action, action_prob, obs, x = main2gvf.get()

    i = 1

    # main loop
    while exit_flag.value == 0:
        while exit_flag.value == 0 and main2gvf.empty():
            time.sleep(0.001)
        if exit_flag.value:
            break

        i += 1
        ude = False
        rupee = False
        if 5000 < i < 5100:
            ude = True
        if i == 7000:
            rupee = True

        # get data from servos
        actionp, action_probp, obsp, xp = main2gvf.get()

        # update weights
        for gs, xi, xpi in zip(gvfs, x, xp):
            for g in gs:
                g.update(action, action_prob, obs, obsp, xi, xpi, ude, rupee)

        # send data to plots
        gdata = [[g.data(xi, obs, action, xpi, obsp)
                  for g in gs]
                 for gs, xi, xpi in zip(gvfs, x, xp)]
        data = dict(ChainMap(*chain.from_iterable(gdata)))
        data['obs'] = obs
        gvf2plot.put(data)

        # go to next state
        obs = obsp
        x = xp
        action = actionp
        action_prob = action_probp

    print('Done learning!')
Ejemplo n.º 18
0
    def __init__(self, max_workers=None):
        """Initializes a new ProcessPoolExecutor instance.

        Args:
            max_workers: The maximum number of processes that can be used to
                execute the given calls. If None or not given then as many
                worker processes will be created as the machine has processors.
        """
        _check_system_limits()

        if max_workers is None:
            self._max_workers = os.cpu_count() or 1
        else:
            if max_workers <= 0:
                raise ValueError("max_workers must be greater than 0")

            self._max_workers = max_workers

        # Make the call queue slightly larger than the number of processes to
        # prevent the worker processes from idling. But don't make it too big
        # because futures in the call queue cannot be cancelled.
        self._call_queue = multiprocessing.Queue(self._max_workers +
                                                 EXTRA_QUEUED_CALLS)
        # Killed worker processes can produce spurious "broken pipe"
        # tracebacks in the queue's own worker thread. But we detect killed
        # processes anyway, so silence the tracebacks.
        self._call_queue._ignore_epipe = True
        self._result_queue = SimpleQueue()
        self._work_ids = queue.Queue()
        self._queue_management_thread = None
        # Map of pids to processes
        self._processes = {}

        # Shutdown is a two-step process.
        self._shutdown_thread = False
        self._shutdown_lock = threading.Lock()
        self._broken = False
        self._queue_count = 0
        self._pending_work_items = {}
Ejemplo n.º 19
0
    def __init__(self,username,password,debug=False):
        """
            GMail SMTP connection worker

            username    : GMail username 
                          This can either be a simple address ('*****@*****.**') 
                          or can include a name ('"A User" <*****@*****.**>').
                          
                          The username specified is used as the sender address

            password    : GMail password
            debug       : Debug flag (passed to smtplib)

            Runs '_gmail_worker' helper in background using multiprocessing
            module.

            '_gmail_worker' loops listening for new message objects on the
            shared queue and sends these using the GMail SMTP connection.
        """
        self.queue = SimpleQueue()
        self.worker = Process(target=_gmail_worker,args=(username,password,self.queue,debug))
        self.worker.start()
Ejemplo n.º 20
0
    def __init__(self, data_structure, processes, scan_function, init_args,
                 _mp_init_function):
        """ Init the scanner.

        data_structure is a world.DataSet
        processes is the number of child processes to use
        scan_function is the function to use for scanning
        init_args are the arguments passed to the init function
        _mp_init_function is the function used to init the child processes
        """
        assert(isinstance(data_structure, world.DataSet))
        self.data_structure = data_structure
        self.list_files_to_scan = data_structure._get_list()
        self.processes = processes
        self.scan_function = scan_function

        # Queue used by processes to pass results
        self.queue = SimpleQueue()
        init_args.update({'queue': self.queue})
        # NOTE TO SELF: initargs doesn't handle kwargs, only args!
        # Pass a dict with all the args
        self.pool = multiprocessing.Pool(processes=processes,
                initializer=_mp_init_function,
                initargs=(init_args,))

        # Recommended time to sleep between polls for results
        self.SCAN_START_SLEEP_TIME = 0.001
        self.SCAN_MIN_SLEEP_TIME = 1e-6
        self.SCAN_MAX_SLEEP_TIME = 0.1
        self.scan_sleep_time = self.SCAN_START_SLEEP_TIME
        self.queries_without_results = 0
        self.last_time = time()
        self.MIN_QUERY_NUM = 1
        self.MAX_QUERY_NUM = 5

        # Holds a friendly string with the name of the last file scanned
        self._str_last_scanned = None
Ejemplo n.º 21
0
def learning_loop(exit_flag: mp.Value,
                  gvfs: Sequence[GTDLearner],
                  main2gvf: mp.SimpleQueue,
                  gvf2plot: mp.SimpleQueue):
    action, action_prob, obs, x = None, None, None, None

    # get first state
    while exit_flag.value == 0 and obs is None:
        while exit_flag.value == 0 and main2gvf.empty():
            time.sleep(0.001)
        if exit_flag.value == 0:
            action, action_prob, obs, x = main2gvf.get()

    # main loop
    while exit_flag.value == 0:
        while exit_flag.value == 0 and main2gvf.empty():
            time.sleep(0.001)
        if exit_flag.value:
            break

        # get data from servos
        actionp, action_probp, obsp, xp = main2gvf.get()

        # update weights
        for g in gvfs:
            g.update(action, action_prob, obs, obsp, x, xp)

        # send data to plots
        data = [[obs]] + [g.data(x, obs, action, xp, obsp) for g in gvfs]
        gvf2plot.put(data)

        # go to next state
        obs = obsp
        x = xp
        action = actionp
        action_prob = action_probp
Ejemplo n.º 22
0
    assert config["action"] == args.action

    procs = []

    if args.action in ["training", "inference"]:

        if (args.action == "inference" and
            "*" in config["model_path"] and
            "--model_path" not in more_args):
            more_args += glob_to_more_args(config["model_path"], "model_path")

        configurations = make_configs_from(config, more_args)

        target = train if args.action == "training" else run_inference

        gpu_queue = SimpleQueue()
        for idx in get_gpus():
            gpu_queue.put(str(idx))


        try:
            #blockPrint()
            for c in configurations:

                #if any(t in c["model_path"] for t in []):
                while gpu_queue.empty():
                    sleep(10)

                p = Process(target=target, args=(c, gpu_queue))
                procs.append(p)
                p.start()

if __name__ == "__main__":

    COUNTER = Value('i', 0)

    while True:
        HTML_NUM = int(input("\nanimenylon_pl page number: "))
        if HTML_NUM <= 0:
            break
        start_time = time.time()
        directory = DESKTOP + "dev1_animenylon_pl0%d\\" % HTML_NUM

        lst = list()
        process_list = list()
        que = SimpleQueue()
        pool = Pool(processes=POOL_NUM)

        page_lst = get_image_page(HTML_PAGE % HTML_NUM)
        page_num = len(page_lst)
        print("Get image page %d, analyzing..." % page_num)

        for url in page_lst:
            res = pool.apply_async(get_image_link, (url, ))
            res.wait()
            image_url = res.get()
            if image_url is None:
                continue
            que.put(image_url)
            pro = Process(target=create_image, args=(que, directory, COUNTER))
            process_list.append(pro)
Ejemplo n.º 24
0
def run_clients(options, db_table_set):
    # Spawn one client for each db.table
    exit_event = multiprocessing.Event()
    processes = []
    error_queue = SimpleQueue()
    interrupt_event = multiprocessing.Event()
    sindex_counter = multiprocessing.Value(ctypes.c_longlong, 0)

    signal.signal(signal.SIGINT, lambda a, b: abort_export(a, b, exit_event, interrupt_event))
    errors = [ ]

    try:
        sizes = get_all_table_sizes(options["host"], options["port"], options["auth_key"], db_table_set)

        progress_info = []

        arg_lists = []
        for db, table in db_table_set:
            progress_info.append((multiprocessing.Value(ctypes.c_longlong, 0),
                                  multiprocessing.Value(ctypes.c_longlong, sizes[(db, table)])))
            arg_lists.append((options["host"],
                              options["port"],
                              options["auth_key"],
                              db, table,
                              options["directory_partial"],
                              options["fields"],
                              options["delimiter"],
                              options["format"],
                              error_queue,
                              progress_info[-1],
                              sindex_counter,
                              exit_event))


        # Wait for all tables to finish
        while len(processes) > 0 or len(arg_lists) > 0:
            time.sleep(0.1)

            while not error_queue.empty():
                exit_event.set() # Stop rather immediately if an error occurs
                errors.append(error_queue.get())

            processes = [process for process in processes if process.is_alive()]

            if len(processes) < options["clients"] and len(arg_lists) > 0:
                processes.append(multiprocessing.Process(target=export_table,
                                                         args=arg_lists.pop(0)))
                processes[-1].start()

            update_progress(progress_info)

        # If we were successful, make sure 100% progress is reported
        # (rows could have been deleted which would result in being done at less than 100%)
        if len(errors) == 0 and not interrupt_event.is_set():
            print_progress(1.0)

        # Continue past the progress output line and print total rows processed
        def plural(num, text, plural_text):
            return "%d %s" % (num, text if num == 1 else plural_text)

        print("")
        print("%s exported from %s, with %s" %
              (plural(sum([max(0, info[0].value) for info in progress_info]), "row", "rows"),
               plural(len(db_table_set), "table", "tables"),
               plural(sindex_counter.value, "secondary index", "secondary indexes")))
    finally:
        signal.signal(signal.SIGINT, signal.SIG_DFL)

    if interrupt_event.is_set():
        raise RuntimeError("Interrupted")

    if len(errors) != 0:
        # multiprocessing queues don't handling tracebacks, so they've already been stringified in the queue
        for error in errors:
            print("%s" % error[1], file=sys.stderr)
            if options["debug"]:
                print("%s traceback: %s" % (error[0].__name__, error[2]), file=sys.stderr)
        raise RuntimeError("Errors occurred during export")
Ejemplo n.º 25
0
class SafeQueue(object):
    """
    Many writers Single Reader multiprocessing safe Queue
    """
    __thread_pool = SingletonThreadPool()

    def __init__(self, *args, **kwargs):
        self._reader_thread = None
        self._q = SimpleQueue(*args, **kwargs)
        # Fix the simple queue write so it uses a single OS write, making it atomic message passing
        # noinspection PyBroadException
        try:
            self._q._writer._send_bytes = partial(
                SafeQueue._pipe_override_send_bytes, self._q._writer)
        except Exception:
            pass
        self._internal_q = None
        self._q_size = 0

    def empty(self):
        return self._q.empty() and (not self._internal_q
                                    or self._internal_q.empty())

    def is_pending(self):
        # check if we have pending requests to be pushed (it does not mean they were pulled)
        # only call from main put process
        return self._q_size > 0

    def close(self, event):
        # wait until all pending requests pushed
        while self.is_pending():
            if event:
                event.set()
            sleep(0.1)

    def get(self, *args, **kwargs):
        return self._get_internal_queue(*args, **kwargs)

    def batch_get(self, max_items=1000, timeout=0.2, throttle_sleep=0.1):
        buffer = []
        timeout_count = int(timeout / throttle_sleep)
        empty_count = timeout_count
        while len(buffer) < max_items:
            while not self.empty() and len(buffer) < max_items:
                try:
                    buffer.append(self._get_internal_queue(block=False))
                    empty_count = 0
                except Empty:
                    break
            empty_count += 1
            if empty_count > timeout_count or len(buffer) >= max_items:
                break
            sleep(throttle_sleep)
        return buffer

    def put(self, obj):
        # GIL will make sure it is atomic
        self._q_size += 1
        # make sure the block put is done in the thread pool i.e. in the background
        obj = pickle.dumps(obj)
        self.__thread_pool.get().apply_async(self._q_put, args=(obj, ))

    def _q_put(self, obj):
        self._q.put(obj)
        # GIL will make sure it is atomic
        self._q_size -= 1

    def _get_internal_queue(self, *args, **kwargs):
        if not self._internal_q:
            self._internal_q = TrQueue()
        if not self._reader_thread:
            self._reader_thread = Thread(target=self._reader_daemon)
            self._reader_thread.daemon = True
            self._reader_thread.start()
        obj = self._internal_q.get(*args, **kwargs)
        # deserialize
        return pickle.loads(obj)

    def _reader_daemon(self):
        # pull from process queue and push into thread queue
        while True:
            # noinspection PyBroadException
            try:
                obj = self._q.get()
                if obj is None:
                    break
            except Exception:
                break
            self._internal_q.put(obj)

    @staticmethod
    def _pipe_override_send_bytes(self, buf):
        n = len(buf)
        # For wire compatibility with 3.2 and lower
        header = struct.pack("!i", n)
        # Issue #20540: concatenate before sending, to avoid delays due
        # to Nagle's algorithm on a TCP socket.
        # Also note we want to avoid sending a 0-length buffer separately,
        # to avoid "broken pipe" errors if the other end closed the pipe.
        self._send(header + buf)
Ejemplo n.º 26
0
 def __init__(self, *args, **kwargs):
     self._q = SimpleQueue(*args, **kwargs)
Ejemplo n.º 27
0
class ProcessPoolExecutor(_base.Executor):
    def __init__(self, max_workers=None):
        """Initializes a new ProcessPoolExecutor instance.

        Args:
            max_workers: The maximum number of processes that can be used to
                execute the given calls. If None or not given then as many
                worker processes will be created as the machine has processors.
        """
        _check_system_limits()

        if max_workers is None:
            self._max_workers = os.cpu_count() or 1
        else:
            self._max_workers = max_workers

        # Make the call queue slightly larger than the number of processes to
        # prevent the worker processes from idling. But don't make it too big
        # because futures in the call queue cannot be cancelled.
        self._call_queue = multiprocessing.Queue(self._max_workers +
                                                 EXTRA_QUEUED_CALLS)
        # Killed worker processes can produce spurious "broken pipe"
        # tracebacks in the queue's own worker thread. But we detect killed
        # processes anyway, so silence the tracebacks.
        self._call_queue._ignore_epipe = True
        self._result_queue = SimpleQueue()
        self._work_ids = queue.Queue()
        self._queue_management_thread = None
        # Map of pids to processes
        self._processes = {}

        # Shutdown is a two-step process.
        self._shutdown_thread = False
        self._shutdown_lock = threading.Lock()
        self._broken = False
        self._queue_count = 0
        self._pending_work_items = {}

    def _start_queue_management_thread(self):
        # When the executor gets lost, the weakref callback will wake up
        # the queue management thread.
        def weakref_cb(_, q=self._result_queue):
            q.put(None)
        if self._queue_management_thread is None:
            # Start the processes so that their sentinels are known.
            self._adjust_process_count()
            self._queue_management_thread = threading.Thread(
                    target=_queue_management_worker,
                    args=(weakref.ref(self, weakref_cb),
                          self._processes,
                          self._pending_work_items,
                          self._work_ids,
                          self._call_queue,
                          self._result_queue))
            self._queue_management_thread.daemon = True
            self._queue_management_thread.start()
            _threads_queues[self._queue_management_thread] = self._result_queue

    def _adjust_process_count(self):
        for _ in range(len(self._processes), self._max_workers):
            p = multiprocessing.Process(
                    target=_process_worker,
                    args=(self._call_queue,
                          self._result_queue))
            p.start()
            self._processes[p.pid] = p

    def submit(self, fn, *args, **kwargs):
        with self._shutdown_lock:
            if self._broken:
                raise BrokenProcessPool('A child process terminated '
                    'abruptly, the process pool is not usable anymore')
            if self._shutdown_thread:
                raise RuntimeError('cannot schedule new futures after shutdown')

            f = _base.Future()
            w = _WorkItem(f, fn, args, kwargs)

            self._pending_work_items[self._queue_count] = w
            self._work_ids.put(self._queue_count)
            self._queue_count += 1
            # Wake up queue management thread
            self._result_queue.put(None)

            self._start_queue_management_thread()
            return f
    submit.__doc__ = _base.Executor.submit.__doc__

    def shutdown(self, wait=True):
        with self._shutdown_lock:
            self._shutdown_thread = True
        if self._queue_management_thread:
            # Wake up queue management thread
            self._result_queue.put(None)
            if wait:
                self._queue_management_thread.join()
        # To reduce the risk of opening too many files, remove references to
        # objects that use file descriptors.
        self._queue_management_thread = None
        self._call_queue = None
        self._result_queue = None
        self._processes = None
    shutdown.__doc__ = _base.Executor.shutdown.__doc__
Ejemplo n.º 28
0
def digest(
    input_fastq: Tuple,
    restriction_enzyme: str,
    mode: str = "pe",
    output_file: os.PathLike = "out.fastq.gz",
    minimum_slice_length: int = 18,
    compression_level: int = 5,
    n_cores: int = 1,
    read_buffer: int = 100000,
    stats_prefix: os.PathLike = "",
    keep_cutsite: bool = False,
    sample_name: str = "",
):
    """
    Performs in silico digestion of one or a pair of fastq files. 

    \f
    Args:
     input_fastq (Tuple): Input fastq files to process
     restriction_enzyme (str): Restriction enzyme name or site to use for digestion.
     mode (str, optional): Digest combined(flashed) or non-combined(pe). 
                           Undigested pe reads are output but flashed are not written. Defaults to "pe".
     output_file (os.PathLike, optional): Output fastq file path. Defaults to "out.fastq.gz".
     minimum_slice_length (int, optional): Minimum allowed length for in silico digested reads. Defaults to 18.
     compression_level (int, optional): Compression level for gzip output (1-9). Defaults to 5.
     n_cores (int, optional): Number of digestion processes to use. Defaults to 1.
     read_buffer (int, optional): Number of reads to process before writing to file. Defaults to 100000.
     stats_prefix (os.PathLike, optional): Output prefix for stats file. Defaults to "".
     keep_cutsite (bool, optional): Determines if cutsite is removed from the output. Defaults to False.
     sample_name (str, optional): Name of sample processed eg. DOX-treated_1. Defaults to ''.
    """

    # Set up multiprocessing variables
    inputq = SimpleQueue()  # reads are placed into this queue for processing
    writeq = SimpleQueue(
    )  # digested reads are placed into the queue for writing
    statq = Queue()  # stats queue

    cut_site = get_re_site(restriction_enzyme)

    # Checks the submode to see in which mode to run
    if mode == "flashed":

        # If flashed reads, more confident in presence of rf junction.
        # Will not allow undigested reads in this case as probably junk.
        reader = FastqReaderProcess(
            input_files=input_fastq,
            outq=inputq,
            n_subprocesses=n_cores,
            read_buffer=read_buffer,
        )

        digestion_processes = [
            ReadDigestionProcess(
                inq=inputq,
                outq=writeq,
                cutsite=cut_site,
                min_slice_length=minimum_slice_length,
                read_type=mode,
                allow_undigested=False,  # Prevents outputting undigested reads
                statq=statq,
            ) for _ in range(n_cores)
        ]

    elif mode == "pe":

        reader = FastqReaderProcess(
            input_files=input_fastq,
            outq=inputq,
            n_subprocesses=n_cores,
            read_buffer=read_buffer,
        )

        digestion_processes = [
            ReadDigestionProcess(
                inq=inputq,
                outq=writeq,
                cutsite=cut_site,
                min_slice_length=minimum_slice_length,
                read_type=mode,
                allow_undigested=True,
                statq=statq,
            ) for _ in range(n_cores)
        ]

    # Writer process is common to both
    writer = FastqWriterProcess(
        inq=writeq,
        output=output_file,
        n_subprocesses=n_cores,
        compression_level=compression_level,
    )

    # Start all processes
    processes = [writer, reader, *digestion_processes]

    for proc in processes:
        proc.start()

    reader.join()
    writer.join()

    # Collate stats
    print("")
    print("Collating stats")
    collated_stats = DigestionStatCollector(statq,
                                            n_cores).get_collated_stats()

    stats = [
        DigestionStatistics(
            sample=sample_name,
            read_type=mode,
            read_number=read_number,
            slices_unfiltered=stats["unfiltered"],
            slices_filtered=stats["filtered"],
        ) for read_number, stats in collated_stats.items()
    ]

    if len(stats) > 1:  # Need to collate stats from digestion of 2+ files
        for stat in stats[1:]:
            digestion_stats = stats[0] + stat
    else:
        digestion_stats = stats[0]

    digestion_stats.unfiltered_histogram.to_csv(
        f"{stats_prefix}.digestion.unfiltered.histogram.csv", index=False)
    digestion_stats.filtered_histogram.to_csv(
        f"{stats_prefix}.digestion.filtered.histogram.csv", index=False)
    digestion_stats.slice_summary.to_csv(
        f"{stats_prefix}.digestion.slice.summary.csv", index=False)
    digestion_stats.read_summary.to_csv(
        f"{stats_prefix}.digestion.read.summary.csv", index=False)

    print(digestion_stats.read_summary)
Ejemplo n.º 29
0
async def servo_loop(device: str,
                     sids: Sequence[int],
                     coder: KanervaCoder,
                     main2gvf: mp.SimpleQueue,
                     behaviour_policy: DiscretePolicy,
                     **kwargs):
    # objects to read and write from servos
    sr, sw = await serial_asyncio.open_serial_connection(url=device,
                                                         **kwargs)
    for sid in sids:
        await send_msg(sr, sw, sid, [0x03, 0x20, 0x00, 0x01])

    # set initial action
    action = initial_action

    # some constants
    read_data = [0x02,  # read
                 0x24,  # starting from 0x24
                 0x08]  # a string of 8 bytes

    # hi = [1.7, 12, 300, 12, 90]
    # lo = [-1.7, 0, -300, 0, -10]
    # observation = np.random.random(5)
    # for i in range(observation.size):
    #     observation[i] = observation[i] * (hi[i] - lo[i]) + lo[i]
    # observation = list(observation)

    store_data = []

    try:
        for _ in range(5000):
            # read data from servos
            byte_data = [await send_msg(sr, sw, sid, read_data) for sid in sids]

            # convert to human-readable data
            obs = sum([parse_data(bd) for bd in byte_data], []) + list(action)

            # get active tiles in kanerva coding
            active_pts = coder(obs)

            # get most recent weights from control GVFs
            pass

            # decide on an action
            action, action_prob = behaviour_policy(obs=obs)

            # send action and features to GVFs
            gvf_data = (action, action_prob, obs, active_pts)
            main2gvf.put(gvf_data)

            # record data for later
            store_data.append(gvf_data)

            # send action to servos
            instructions = [goal_instruction(a) for a in action]
            for sid, instr in zip(sids, instructions):
                await send_msg(sr, sw, sid, instr)

        np.save('offline_data.npy', store_data)

    except KeyboardInterrupt:
        pass
    finally:
        sr.read()
        await sw.drain()

        for sid in sids:
            write(sw, sid, [0x03, 0x18, 0x00])  # disable torque
Ejemplo n.º 30
0
    def __iter__(self):
        def sample_generator(generator, data_queue, count, tid):
            if self.seed is not None:
                random.seed(self.seed + tid)
                np.random.seed(self.seed + tid)

            idx_ls = list(range(len(generator)))
            if self.shuffle:
                random.shuffle(idx_ls)

            for i in idx_ls:
                if i % self.num_worker != tid:
                    continue

                while count.value >= self.maxsize > 0:
                    time.sleep(0.02)
                    continue

                data_queue.put(generator[i])
                with count.get_lock():
                    count.value += 1

            data_queue.put(StopGenerator(pid=tid))
            with count.get_lock():
                count.value += 1

        data_queue = SimpleQueue()
        count = Value('i', 0)

        process_map = dict()
        for tid in range(self.num_worker):
            process = Process(target=sample_generator,
                              args=(self.generator, data_queue, count, tid))
            process.daemon = True
            process.start()
            process_map[tid] = process

        def single_generator():
            while len(process_map) > 0:
                item = data_queue.get()
                with count.get_lock():
                    count.value -= 1

                if isinstance(item, StopGenerator):
                    del process_map[item.pid]
                    continue

                yield item

        def parallel_generator():
            result = []
            while len(process_map) > 0:
                item = data_queue.get()
                with count.get_lock():
                    count.value -= 1

                if isinstance(item, StopGenerator):
                    del process_map[item.pid]
                    continue

                result.append(item)
                if len(result) >= self.batch_size:
                    if self.collate_fn is not None:
                        result = self.collate_fn(result)

                    yield result
                    result = []

        return parallel_generator() if self.batch_size else single_generator()
Ejemplo n.º 31
0
def split(
    input_files: Tuple,
    method: str = "unix",
    output_prefix: os.PathLike = "split",
    compression_level: int = 5,
    n_reads: int = 1000000,
    gzip: bool = True,
):
    """ 
    Splits fastq file(s) into equal chunks of n reads.

    \f
    Args:
     input_files (Tuple): Input fastq files to process.
     method (str, optional): Python or unix method (faster but not guarenteed to mantain read pairings) to split the fastq files. Defaults to "unix".
     output_prefix (os.PathLike, optional): Output prefix for split fastq files. Defaults to "split".
     compression_level (int, optional): Compression level for gzipped output. Defaults to 5.
     n_reads (int, optional): Number of reads to split the input fastq files into. Defaults to 1000000.
     gzip (bool, optional): Gzip compress output files if True. Defaults to True.
    
    """

    from ccanalyser.tools.io import (
        FastqReaderProcess,
        FastqWriterSplitterProcess,
        FastqReadFormatterProcess,
    )

    if method == "python":
        readq = SimpleQueue()
        writeq = SimpleQueue()

        paired = True if len(input_files) > 1 else False

        reader = FastqReaderProcess(
            input_files=input_files,
            outq=readq,
            read_buffer=n_reads,
            n_subprocesses=1,
        )

        formatter = [
            FastqReadFormatterProcess(inq=readq, outq=writeq) for _ in range(1)
        ]

        writer = FastqWriterSplitterProcess(
            inq=writeq,
            output_prefix=output_prefix,
            paired_output=paired,
            n_subprocesses=1,
            gzip=gzip,
            compression_level=compression_level,
        )

        processes = [writer, reader, *formatter]

        for proc in processes:
            proc.start()

        for proc in processes:
            proc.join()
            proc.terminate()

    elif method == "unix":  # Using unix split to perform the splitting

        tasks = []
        for ii, fn in enumerate(input_files):
            t = delayed(run_unix_split)(fn,
                                        n_reads=n_reads,
                                        read_number=ii + 1,
                                        gzip=gzip,
                                        compression_level=compression_level,
                                        output_prefix=output_prefix)

            tasks.append(t)

        # Run splitting
        Parallel(n_jobs=2)(tasks)

        # The suffixes are in the format 00, 01, 02 etc need to replace with int
        for fn in glob.glob(f"{output_prefix}_part*"):
            src = fn
            part_no = int(re.match(r"(?:.*)_part(\d+)_[1|2].*", fn).group(1))
            dest = re.sub(r"_part\d+_", f"_part{part_no}_", src)
            os.rename(src, dest)
Ejemplo n.º 32
0
    def restore(self,
                hostname_backup_name,
                restore_resource,
                overwrite,
                recent_to_date,
                backup_media=None):
        """

        :param hostname_backup_name:
        :param restore_path:
        :param overwrite:
        :param recent_to_date:
        """
        if backup_media == 'fs':
            LOG.info("Creating restore path: {0}".format(restore_resource))
            # if restore path can't be created this function will raise
            # exception
            utils.create_dir_tree(restore_resource)
            if not overwrite and not utils.is_empty_dir(restore_resource):
                raise Exception(
                    "Restore dir is not empty. "
                    "Please use --overwrite or provide different path "
                    "or remove the content of {}".format(restore_resource))

            LOG.info("Restore path creation completed")

        backups = self.storage.get_latest_level_zero_increments(
            engine=self,
            hostname_backup_name=hostname_backup_name,
            recent_to_date=recent_to_date)

        max_level = max(backups.keys())

        # Use SimpleQueue because Queue does not work on Mac OS X.
        read_except_queue = SimpleQueue()
        LOG.info("Restoring backup {0}".format(hostname_backup_name))
        for level in range(0, max_level + 1):
            LOG.info("Restoring from level {0}".format(level))
            backup = backups[level]
            read_pipe, write_pipe = multiprocessing.Pipe()
            process_stream = multiprocessing.Process(target=self.read_blocks,
                                                     args=(backup, write_pipe,
                                                           read_pipe,
                                                           read_except_queue))

            process_stream.daemon = True
            process_stream.start()
            write_pipe.close()

            # Start the tar pipe consumer process

            # Use SimpleQueue because Queue does not work on Mac OS X.
            write_except_queue = SimpleQueue()

            engine_stream = multiprocessing.Process(target=self.restore_level,
                                                    args=(restore_resource,
                                                          read_pipe, backup,
                                                          write_except_queue))

            engine_stream.daemon = True
            engine_stream.start()

            read_pipe.close()
            write_pipe.close()
            process_stream.join()
            engine_stream.join()

            # SimpleQueue handling is different from queue handling.
            def handle_except_SimpleQueue(except_queue):
                if not except_queue.empty():
                    while not except_queue.empty():
                        e = except_queue.get()
                        LOG.exception('Engine error: {0}'.format(e))
                    return True
                else:
                    return False

            got_exception = None
            got_exception = (handle_except_SimpleQueue(read_except_queue)
                             or got_exception)
            got_exception = (handle_except_SimpleQueue(write_except_queue)
                             or got_exception)

            if engine_stream.exitcode or got_exception:
                raise engine_exceptions.EngineException(
                    "Engine error. Failed to restore.")

        LOG.info('Restore completed successfully for backup name '
                 '{0}'.format(hostname_backup_name))
Ejemplo n.º 33
0


##########################################################################################################################

#counting stat that will let keep track of how far we are
linesRead = 0
podsQueued = 0
podsDone = 0
podsSorted = 0
podsWritten = 0
linesWritten = 0


'''Create queues to hold the data. One will hold the pod info to be passed to the parser'''
inQueue = SimpleQueue()
#one will hold the results (in the order they come)
doneQueue = SimpleQueue()
#one will hold the sorted results to be written
writeQueue = SimpleQueue()


'''start worker Processes for parser. The comand should be tailored for the analysis wrapper function
of course these will only start doing anything after we put data into the line queue
the function we call is actually a wrapper for another function.(s)
This one reads from the pod queue, passes each line some analysis function(s), gets the results and sends to the result queue'''
for x in range(nProcs):
    worker = Process(target=analysisWrapper,args=(inQueue,doneQueue,args.inputGenoFormat,args.outputGenoFormat,args.alleleOrder,
                                                  headers,include,exclude,samples,minCalls,minPopCallsDict,minAlleles,maxAlleles,
                                                  minPopAllelesDict,maxPopAllelesDict,minVarCount,maxHet,minFreq,maxFreq,
                                                  HWE_P,HWE_side,popDict,ploidyDict,fixed,args.nearlyFixedDiff,args.forcePloidy,
Ejemplo n.º 34
0
def main_simple_queue():
    recv_q = SimpleQueue()
    Process(target=worker_queue, args=(recv_q, )).start()
    for num in range(NUM):
        message = recv_q.get()
Ejemplo n.º 35
0
class ProcessPoolExecutor(_base.Executor):
    def __init__(self, max_workers=None):
        """Initializes a new ProcessPoolExecutor instance.

        Args:
            max_workers: The maximum number of processes that can be used to
                execute the given calls. If None or not given then as many
                worker processes will be created as the machine has processors.
        """
        _check_system_limits()

        if max_workers is None:
            self._max_workers = os.cpu_count() or 1
        else:
            if max_workers <= 0:
                raise ValueError("max_workers must be greater than 0")

            self._max_workers = max_workers

        # Make the call queue slightly larger than the number of processes to
        # prevent the worker processes from idling. But don't make it too big
        # because futures in the call queue cannot be cancelled.
        self._call_queue = multiprocessing.Queue(self._max_workers +
                                                 EXTRA_QUEUED_CALLS)
        # Killed worker processes can produce spurious "broken pipe"
        # tracebacks in the queue's own worker thread. But we detect killed
        # processes anyway, so silence the tracebacks.
        self._call_queue._ignore_epipe = True
        self._result_queue = SimpleQueue()
        self._work_ids = queue.Queue()
        self._queue_management_thread = None
        # Map of pids to processes
        self._processes = {}

        # Shutdown is a two-step process.
        self._shutdown_thread = False
        self._shutdown_lock = threading.Lock()
        self._broken = False
        self._queue_count = 0
        self._pending_work_items = {}

    def _start_queue_management_thread(self):
        # When the executor gets lost, the weakref callback will wake up
        # the queue management thread.
        def weakref_cb(_, q=self._result_queue):
            q.put(None)

        if self._queue_management_thread is None:
            # Start the processes so that their sentinels are known.
            self._adjust_process_count()
            self._queue_management_thread = threading.Thread(
                target=_queue_management_worker,
                args=(weakref.ref(self, weakref_cb),
                      self._processes,
                      self._pending_work_items,
                      self._work_ids,
                      self._call_queue,
                      self._result_queue))
            self._queue_management_thread.daemon = True
            self._queue_management_thread.start()
            _threads_queues[self._queue_management_thread] = self._result_queue

    def _adjust_process_count(self):
        for _ in range(len(self._processes), self._max_workers):
            p = multiprocessing.Process(
                target=_process_worker,
                args=(self._call_queue,
                      self._result_queue))
            p.start()
            self._processes[p.pid] = p

    def submit(self, fn, *args, **kwargs):
        with self._shutdown_lock:
            if self._broken:
                raise BrokenProcessPool('A child process terminated '
                                        'abruptly, the process pool is not usable anymore')
            if self._shutdown_thread:
                raise RuntimeError('cannot schedule new futures after shutdown')

            f = _base.Future()
            w = _WorkItem(f, fn, args, kwargs)

            self._pending_work_items[self._queue_count] = w
            self._work_ids.put(self._queue_count)
            self._queue_count += 1
            # Wake up queue management thread
            self._result_queue.put(None)

            self._start_queue_management_thread()
            return f

    submit.__doc__ = _base.Executor.submit.__doc__

    def map(self, fn, *iterables, timeout=None, chunksize=1):
        """Returns an iterator equivalent to map(fn, iter).

        Args:
            fn: A callable that will take as many arguments as there are
                passed iterables.
            timeout: The maximum number of seconds to wait. If None, then there
                is no limit on the wait time.
            chunksize: If greater than one, the iterables will be chopped into
                chunks of size chunksize and submitted to the process pool.
                If set to one, the items in the list will be sent one at a time.

        Returns:
            An iterator equivalent to: map(func, *iterables) but the calls may
            be evaluated out-of-order.

        Raises:
            TimeoutError: If the entire result iterator could not be generated
                before the given timeout.
            Exception: If fn(*args) raises for any values.
        """
        if chunksize < 1:
            raise ValueError("chunksize must be >= 1.")

        results = super().map(partial(_process_chunk, fn),
                              _get_chunks(*iterables, chunksize=chunksize),
                              timeout=timeout)
        return _chain_from_iterable_of_lists(results)

    def shutdown(self, wait=True):
        with self._shutdown_lock:
            self._shutdown_thread = True
        if self._queue_management_thread:
            # Wake up queue management thread
            self._result_queue.put(None)
            if wait:
                self._queue_management_thread.join()
        # To reduce the risk of opening too many files, remove references to
        # objects that use file descriptors.
        self._queue_management_thread = None
        self._call_queue = None
        self._result_queue = None
        self._processes = None

    shutdown.__doc__ = _base.Executor.shutdown.__doc__
Ejemplo n.º 36
0
class ProcessPoolExecutor(_base.Executor):
    def __init__(self, max_workers=None):
        """Initializes a new ProcessPoolExecutor instance.

        Args:
            max_workers: The maximum number of processes that can be used to
                execute the given calls. If None or not given then as many
                worker processes will be created as the machine has processors.
        """
        _check_system_limits()

        if max_workers is None:
            self._max_workers = os.cpu_count() or 1
        else:
            if max_workers <= 0:
                raise ValueError("max_workers must be greater than 0")

            self._max_workers = max_workers

        # Make the call queue slightly larger than the number of processes to
        # prevent the worker processes from idling. But don't make it too big
        # because futures in the call queue cannot be cancelled.
        self._call_queue = multiprocessing.Queue(self._max_workers +
                                                 EXTRA_QUEUED_CALLS)
        # Killed worker processes can produce spurious "broken pipe"
        # tracebacks in the queue's own worker thread. But we detect killed
        # processes anyway, so silence the tracebacks.
        self._call_queue._ignore_epipe = True
        self._result_queue = SimpleQueue()
        self._work_ids = queue.Queue()
        self._queue_management_thread = None
        # Map of pids to processes
        self._processes = {}

        # Shutdown is a two-step process.
        self._shutdown_thread = False
        self._shutdown_lock = threading.Lock()
        self._broken = False
        self._queue_count = 0
        self._pending_work_items = {}

    def _start_queue_management_thread(self):
        # When the executor gets lost, the weakref callback will wake up
        # the queue management thread.
        def weakref_cb(_, q=self._result_queue):
            q.put(None)

        if self._queue_management_thread is None:
            # Start the processes so that their sentinels are known.
            self._adjust_process_count()
            self._queue_management_thread = threading.Thread(
                target=_queue_management_worker,
                args=(weakref.ref(self, weakref_cb), self._processes,
                      self._pending_work_items, self._work_ids,
                      self._call_queue, self._result_queue))
            self._queue_management_thread.daemon = True
            self._queue_management_thread.start()
            _threads_queues[self._queue_management_thread] = self._result_queue

    def _adjust_process_count(self):
        for _ in range(len(self._processes), self._max_workers):
            p = multiprocessing.Process(target=_process_worker,
                                        args=(self._call_queue,
                                              self._result_queue))
            p.start()
            self._processes[p.pid] = p

    def submit(self, fn, *args, **kwargs):
        with self._shutdown_lock:
            if self._broken:
                raise BrokenProcessPool(
                    'A child process terminated '
                    'abruptly, the process pool is not usable anymore')
            if self._shutdown_thread:
                raise RuntimeError(
                    'cannot schedule new futures after shutdown')

            f = _base.Future()
            w = _WorkItem(f, fn, args, kwargs)

            self._pending_work_items[self._queue_count] = w
            self._work_ids.put(self._queue_count)
            self._queue_count += 1
            # Wake up queue management thread
            self._result_queue.put(None)

            self._start_queue_management_thread()
            return f

    submit.__doc__ = _base.Executor.submit.__doc__

    def shutdown(self, wait=True):
        with self._shutdown_lock:
            self._shutdown_thread = True
        if self._queue_management_thread:
            # Wake up queue management thread
            self._result_queue.put(None)
            if wait:
                self._queue_management_thread.join()
        # To reduce the risk of opening too many files, remove references to
        # objects that use file descriptors.
        self._queue_management_thread = None
        self._call_queue = None
        self._result_queue = None
        self._processes = None

    shutdown.__doc__ = _base.Executor.shutdown.__doc__
Ejemplo n.º 37
0
def spawn_import_clients(options, files_info):
    # Spawn one reader process for each db.table, as well as many client processes
    task_queue = SimpleQueue()
    error_queue = SimpleQueue()
    exit_event = multiprocessing.Event()
    interrupt_event = multiprocessing.Event()
    errors = []
    reader_procs = []
    client_procs = []

    parent_pid = os.getpid()
    signal.signal(signal.SIGINT, lambda a, b: abort_import(a, b, parent_pid, exit_event, task_queue, client_procs, interrupt_event))

    try:
        progress_info = []
        rows_written = multiprocessing.Value(ctypes.c_longlong, 0)

        for i in xrange(options["clients"]):
            client_procs.append(multiprocessing.Process(target=client_process,
                                                        args=(options["host"],
                                                              options["port"],
                                                              options["auth_key"],
                                                              task_queue,
                                                              error_queue,
                                                              rows_written,
                                                              options["force"],
                                                              options["durability"])))
            client_procs[-1].start()

        for file_info in files_info:
            progress_info.append((multiprocessing.Value(ctypes.c_longlong, -1), # Current lines/bytes processed
                                  multiprocessing.Value(ctypes.c_longlong, 0))) # Total lines/bytes to process
            reader_procs.append(multiprocessing.Process(target=table_reader,
                                                        args=(options,
                                                              file_info,
                                                              task_queue,
                                                              error_queue,
                                                              progress_info[-1],
                                                              exit_event)))
            reader_procs[-1].start()

        # Wait for all reader processes to finish - hooray, polling
        while len(reader_procs) > 0:
            time.sleep(0.1)
            # If an error has occurred, exit out early
            while not error_queue.empty():
                exit_event.set()
                errors.append(error_queue.get())
            reader_procs = [proc for proc in reader_procs if proc.is_alive()]
            update_progress(progress_info)

        # Wait for all clients to finish
        alive_clients = sum([client.is_alive() for client in client_procs])
        for i in xrange(alive_clients):
            task_queue.put(StopIteration())

        while len(client_procs) > 0:
            time.sleep(0.1)
            client_procs = [client for client in client_procs if client.is_alive()]

        # If we were successful, make sure 100% progress is reported
        if len(errors) == 0 and not interrupt_event.is_set():
            print_progress(1.0)

        def plural(num, text):
            return "%d %s%s" % (num, text, "" if num == 1 else "s")

        # Continue past the progress output line
        print("")
        print("%s imported in %s" % (plural(rows_written.value, "row"),
                                     plural(len(files_info), "table")))
    finally:
        signal.signal(signal.SIGINT, signal.SIG_DFL)

    if interrupt_event.is_set():
        raise RuntimeError("Interrupted")

    if len(errors) != 0:
        # multiprocessing queues don't handling tracebacks, so they've already been stringified in the queue
        for error in errors:
            print("%s" % error[1], file=sys.stderr)
            if options["debug"]:
                print("%s traceback: %s" % (error[0].__name__, error[2]), file=sys.stderr)
            if len(error) == 4:
                print("In file: %s" % error[3], file=sys.stderr)
        raise RuntimeError("Errors occurred during import")
Ejemplo n.º 38
0
import RPi.GPIO as GPIO
import time
from multiprocessing import Process, SimpleQueue
import serverClient as serv
import socket
from libares.moistconsts import *
from libares.constants import *

dataQueue = SimpleQueue()


def sendData(opcode, data):
    toSend = []
    toSend.append(opcode)
    toSend.extend(data)
    dataQueue.put(toSend)
    print("event-raw> Send data: 0x{}".format(bytearray(toSend).hex()))


def getSwitch(channel):
    return not GPIO.input(channel)


def switch_callback(channel):
    if not moistEnabled:
        print("event> not enabled :(")
        return

    time.sleep(0.01)
    switch_in = getSwitch(channel)
    switch = MAP[channel]
Ejemplo n.º 39
0
def fork_process(logger,
                 group=None,
                 target=None,
                 name=None,
                 args=(),
                 kwargs={}):
    """
    Forks a child, making sure that all exceptions from the child are safely sent to the parent
    If a target raises an exception, the exception is re-raised in the parent process
    @return tuple consisting of process exit code and target's return value
    """
    if is_windows():
        logger.warn(
            "Not forking for %s due to Windows incompatibilities (see #184). "
            "Measurements (coverage, etc.) might be biased." % target)
        return fake_windows_fork(group, target, name, args, kwargs)
    try:
        sys.modules["tblib.pickling_support"]
    except KeyError:
        import tblib.pickling_support

        tblib.pickling_support.install()

    q = SimpleQueue()

    def instrumented_target(*args, **kwargs):
        ex = tb = None
        try:
            send_value = (target(*args, **kwargs), None, None)
        except:
            _, ex, tb = sys.exc_info()
            send_value = (None, ex, tb)

        try:
            q.put(send_value)
        except:
            _, send_ex, send_tb = sys.exc_info()
            e_out = Exception(str(send_ex), send_tb,
                              None if ex is None else str(ex), tb)
            q.put(e_out)

    p = Process(group=group,
                target=instrumented_target,
                name=name,
                args=args,
                kwargs=kwargs)
    p.start()
    result = q.get()
    p.join()
    if isinstance(result, tuple):
        if result[1]:
            raise_exception(result[1], result[2])
        return p.exitcode, result[0]
    else:
        msg = "Fatal error occurred in the forked process %s: %s" % (
            p, result.args[0])
        if result.args[2]:
            chained_message = "This error masked the send error '%s':\n%s" % (
                result.args[2], "".join(traceback.format_tb(result.args[3])))
            msg += "\n" + chained_message
        ex = Exception(msg)
        raise_exception(ex, result.args[1])
Ejemplo n.º 40
0
class ProcessPoolExecutor(_base.Executor):
    def __init__(self, max_workers=None):
        """Initializes a new ProcessPoolExecutor instance.

        Args:
            max_workers: The maximum number of processes that can be used to
                execute the given calls. If None or not given then as many
                worker processes will be created as the machine has processors.
        """
        _check_system_limits()

        if max_workers is None:
            self._max_workers = os.cpu_count() or 1
        else:
            if max_workers <= 0:
                raise ValueError("max_workers must be greater than 0")

            self._max_workers = max_workers

        # Make the call queue slightly larger than the number of processes to
        # prevent the worker processes from idling. But don't make it too big
        # because futures in the call queue cannot be cancelled.
        self._call_queue = multiprocessing.Queue(self._max_workers +
                                                 EXTRA_QUEUED_CALLS)
        # Killed worker processes can produce spurious "broken pipe"
        # tracebacks in the queue's own worker thread. But we detect killed
        # processes anyway, so silence the tracebacks.
        self._call_queue._ignore_epipe = True
        self._result_queue = SimpleQueue()
        self._work_ids = queue.Queue()
        self._queue_management_thread = None
        # Map of pids to processes
        self._processes = {}

        # Shutdown is a two-step process.
        self._shutdown_thread = False
        self._shutdown_lock = threading.Lock()
        self._broken = False
        self._queue_count = 0
        self._pending_work_items = {}

    def _start_queue_management_thread(self):
        # When the executor gets lost, the weakref callback will wake up
        # the queue management thread.
        def weakref_cb(_, q=self._result_queue):
            q.put(None)

        if self._queue_management_thread is None:
            # Start the processes so that their sentinels are known.
            self._adjust_process_count()
            self._queue_management_thread = threading.Thread(
                target=_queue_management_worker,
                args=(weakref.ref(self, weakref_cb), self._processes,
                      self._pending_work_items, self._work_ids,
                      self._call_queue, self._result_queue))
            self._queue_management_thread.daemon = True
            self._queue_management_thread.start()
            _threads_queues[self._queue_management_thread] = self._result_queue

    def _adjust_process_count(self):
        for _ in range(len(self._processes), self._max_workers):
            p = multiprocessing.Process(target=_process_worker,
                                        args=(self._call_queue,
                                              self._result_queue))
            p.start()
            self._processes[p.pid] = p

    def submit(self, fn, *args, **kwargs):
        with self._shutdown_lock:
            if self._broken:
                raise BrokenProcessPool(
                    'A child process terminated '
                    'abruptly, the process pool is not usable anymore')
            if self._shutdown_thread:
                raise RuntimeError(
                    'cannot schedule new futures after shutdown')

            f = _base.Future()
            w = _WorkItem(f, fn, args, kwargs)

            self._pending_work_items[self._queue_count] = w
            self._work_ids.put(self._queue_count)
            self._queue_count += 1
            # Wake up queue management thread
            self._result_queue.put(None)

            self._start_queue_management_thread()
            return f

    submit.__doc__ = _base.Executor.submit.__doc__

    def map(self, fn, *iterables, timeout=None, chunksize=1):
        """Returns an iterator equivalent to map(fn, iter).

        Args:
            fn: A callable that will take as many arguments as there are
                passed iterables.
            timeout: The maximum number of seconds to wait. If None, then there
                is no limit on the wait time.
            chunksize: If greater than one, the iterables will be chopped into
                chunks of size chunksize and submitted to the process pool.
                If set to one, the items in the list will be sent one at a time.

        Returns:
            An iterator equivalent to: map(func, *iterables) but the calls may
            be evaluated out-of-order.

        Raises:
            TimeoutError: If the entire result iterator could not be generated
                before the given timeout.
            Exception: If fn(*args) raises for any values.
        """
        if chunksize < 1:
            raise ValueError("chunksize must be >= 1.")

        results = super().map(partial(_process_chunk, fn),
                              _get_chunks(*iterables, chunksize=chunksize),
                              timeout=timeout)
        return _chain_from_iterable_of_lists(results)

    def shutdown(self, wait=True):
        with self._shutdown_lock:
            self._shutdown_thread = True
        if self._queue_management_thread:
            # Wake up queue management thread
            self._result_queue.put(None)
            if wait:
                self._queue_management_thread.join()
        # To reduce the risk of opening too many files, remove references to
        # objects that use file descriptors.
        self._queue_management_thread = None
        if self._call_queue is not None:
            self._call_queue.close()
            if wait:
                self._call_queue.join_thread()
            self._call_queue = None
        self._result_queue = None
        self._processes = None

    shutdown.__doc__ = _base.Executor.shutdown.__doc__
Ejemplo n.º 41
0
    scafsToInclude = [line.rstrip() for line in scafsFile.readlines()]
    sys.stderr.write("{} scaffolds will be analysed.".format(
        len(scafsToInclude)))
    scafsFile.close()
else:
    scafsToInclude = None

##########################################################################################################

#counting stat that will let keep track of how far we are
windowsQueued = 0
resultsReceived = 0
resultsWritten = 0
resultsHandled = 0
'''Create queues to hold the data one will hold the line info to be passed to the analysis'''
windowQueue = SimpleQueue()
#one will hold the results (in the order they come)
resultQueue = SimpleQueue()
#one will hold the sorted results to be written
writeQueue = SimpleQueue()
'''start worker Processes for analysis. The comand should be tailored for the analysis wrapper function
of course these will only start doing anything after we put data into the line queue
the function we call is actually a wrapper for another function.(s) This one reads from the line queue, passes to some analysis function(s), gets the results and sends to the result queue'''
for x in range(args.threads):
    worker = Process(target=stats_wrapper,
                     args=(windowQueue, resultQueue, args.windType,
                           args.genoFormat, sampleData, minSites,
                           args.minPerInd, args.includeSameWithSame,
                           args.outFormat, args.roundTo, outputWindowData,
                           args.addWindowID))
    worker.daemon = True
Ejemplo n.º 42
0
    def measure_cmd_resources(self, cmd, name, legend, save_bs=False):
        """Measure system resource usage of a command"""
        def _worker(data_q, cmd, **kwargs):
            """Worker process for measuring resources"""
            try:
                start_time = datetime.now()
                ret = runCmd2(cmd, **kwargs)
                etime = datetime.now() - start_time
                rusage_struct = resource.getrusage(resource.RUSAGE_CHILDREN)
                iostat = OrderedDict()
                with open('/proc/{}/io'.format(os.getpid())) as fobj:
                    for line in fobj.readlines():
                        key, val = line.split(':')
                        iostat[key] = int(val)
                rusage = OrderedDict()
                # Skip unused fields, (i.e. 'ru_ixrss', 'ru_idrss', 'ru_isrss',
                # 'ru_nswap', 'ru_msgsnd', 'ru_msgrcv' and 'ru_nsignals')
                for key in [
                        'ru_utime', 'ru_stime', 'ru_maxrss', 'ru_minflt',
                        'ru_majflt', 'ru_inblock', 'ru_oublock', 'ru_nvcsw',
                        'ru_nivcsw'
                ]:
                    rusage[key] = getattr(rusage_struct, key)
                data_q.put({
                    'ret': ret,
                    'start_time': start_time,
                    'elapsed_time': etime,
                    'rusage': rusage,
                    'iostat': iostat
                })
            except Exception as err:
                data_q.put(err)

        cmd_str = cmd if isinstance(cmd, str) else ' '.join(cmd)
        log.info("Timing command: %s", cmd_str)
        data_q = SimpleQueue()
        try:
            proc = Process(target=_worker, args=(
                data_q,
                cmd,
            ))
            proc.start()
            data = data_q.get()
            proc.join()
            if isinstance(data, Exception):
                raise data
        except CommandError:
            log.error("Command '%s' failed", cmd_str)
            raise
        etime = data['elapsed_time']

        measurement = OrderedDict([('type', self.SYSRES), ('name', name),
                                   ('legend', legend)])
        measurement['values'] = OrderedDict([('start_time',
                                              data['start_time']),
                                             ('elapsed_time', etime),
                                             ('rusage', data['rusage']),
                                             ('iostat', data['iostat'])])
        if save_bs:
            self.save_buildstats(name)

        self._append_measurement(measurement)

        # Append to 'times' array for globalres log
        e_sec = etime.total_seconds()
        self.times.append('{:d}:{:02d}:{:05.2f}'.format(
            int(e_sec / 3600), int((e_sec % 3600) / 60), e_sec % 60))
Ejemplo n.º 43
0
class WindowRecorder:
    """Programatically video record a window in Linux (requires xwininfo)"""
    def __init__(self,
                 window_names: Iterable[AnyStr] = None,
                 frame_rate=30.0,
                 name_suffix="",
                 save_dir=None):
        if window_names is None:
            logger.info(
                "Select a window to record by left clicking with your mouse")
            output = subprocess.check_output(["xwininfo"],
                                             universal_newlines=True)
            logger.info(f"Selected {output}")
        else:
            for name in window_names:
                try:
                    output = subprocess.check_output(
                        ["xwininfo", "-name", name], universal_newlines=True)
                    break
                except subprocess.CalledProcessError as e:
                    logger.debug(
                        f"Could not find window named {name}, trying next in list"
                    )
                    pass
            else:
                raise RuntimeError(
                    f"Could not find any windows with names from {window_names}"
                )

        properties = {}
        for line in output.split("\n"):
            if ":" in line:
                parts = line.split(":", 1)
                properties[parts[0].strip()] = parts[1].strip()

        left, top = int(properties["Absolute upper-left X"]), int(
            properties["Absolute upper-left Y"])
        width, height = int(properties["Width"]), int(properties["Height"])

        self.monitor = {
            "top": top,
            "left": left,
            "width": width,
            "height": height
        }
        self.frame_rate = frame_rate
        self.suffix = name_suffix
        self.save_dir = save_dir
        if self.save_dir is None:
            self.save_dir = cfg.CAPTURE_DIR

    def __enter__(self):
        if not os.path.exists(self.save_dir):
            raise FileNotFoundError(
                f"Trying to record to {self.save_dir}, but folder does not exist"
            )

        output = os.path.join(
            self.save_dir,
            f"{datetime.now().strftime('%Y_%m_%d_%H_%M_%S')}_{self.suffix}.mp4"
        )
        logger.debug(f"Recording video to {output}")
        self.q = SimpleQueue()
        self.record_process = Process(target=_record_loop,
                                      args=(self.q, output, self.monitor,
                                            self.frame_rate))
        self.record_process.start()
        return self

    def __exit__(self, *args):
        self.q.put('die')
        self.record_process.join()
        cv2.destroyAllWindows()
Ejemplo n.º 44
0
async def servo_loop(device: str,
                     sids: Sequence[int],
                     main2gvf: mp.SimpleQueue,
                     behaviour_policy: DiscretePolicy,
                     coder: KanervaCoder,
                     **kwargs):
    # objects to read and write from servos
    sr, sw = await serial_asyncio.open_serial_connection(url=device,
                                                         **kwargs)

    # set servo speeds to slowest possible
    for sid in sids:
        # await send_msg(sr, sw, sid, [])
        await send_msg(sr, sw, sid, [0x03, 0x20, 0x00, 0x01])

    # set initial action
    action = initial_action

    # some constants
    # read_data = [0x02,  # read
    #              0x24,  # starting from 0x24
    #              0x08]  # a string of 8 bytes

    read_all = [0x02,  # read
                0x00,  # starting from the beginning
                0x32]  # all the bytes

    store_data = []

    try:
        for _ in range(20000):
            # read data from servos
            byte_data = [await send_msg(sr, sw, sid, read_all) for sid in sids]

            # convert to human-readable data
            obs = sum([parse_data(bd) for bd in byte_data], list(action))

            # make feature vector
            active_pts = coder(obs=obs, byte_data=byte_data)

            # get most recent weights from control GVFs
            pass

            # decide on an action
            action, action_prob = behaviour_policy(obs=obs, x=active_pts)

            # send action to servos
            instructions = [goal_instruction(a)
                            for a in action
                            if a is not None]
            for sid, instr in zip(sids, instructions):
                await send_msg(sr, sw, sid, instr)

            # send action and features to GVFs
            gvf_data = (action, action_prob, obs, active_pts)
            if locks:
                print('main gm a 1 a')
                gmlock.acquire()
                print('main gm a 1 b')
            main2gvf.put(gvf_data)
            if locks:
                print('main gm r 1 a')
                gmlock.release()
                print('main gm r 1 b')

            # record data for later
            store_data.append(gvf_data)

        np.save('offline_data.npy', store_data)

    except KeyboardInterrupt:
        pass
    finally:
        sr.read()
        await sw.drain()

        for sid in sids:
            write(sw, sid, [0x03, 0x18, 0x00])  # disable torque
Ejemplo n.º 45
0
class CozmoMqttProgram():
    def __init__(self) -> None:
        self._cozmo = cozmo_client.Cozmo()
        self._queue = SimpleQueue()
        self._mqtt_client = None
        if MQTT_BROKER_URL is not None:
            self._mqtt_client = mqtt_client.MqttClient(
                MQTT_BROKER_URL, MQTT_BROKER_PORT, MQTT_USERNAME,
                MQTT_PASSWORD, MQTT_TOPICS, self._on_mqtt_message)
        self.sdk_conn: CozmoConnection = None
        self._faces: Dict[Face, datetime] = dict()
        self._visible_objects: Dict[ObservableObject, datetime] = dict()
        self._message_manager = MessageManager()
        self._cozmo_state = CozmoStates.Disconnected

    @property
    def cozmo_state(self) -> CozmoStates:
        return self._cozmo_state

    @cozmo_state.setter
    def cozmo_state(self, state: CozmoStates) -> None:
        self._cozmo_state = state
        self._publish_cozmo_state()

    async def run_with_robot_async(self, robot: cozmo.robot.Robot) -> None:
        self.sdk_conn = robot.world.conn
        await self._run_async(robot)

    async def _run_async(self, robot: cozmo.robot.Robot) -> None:
        await self._initialize_async(robot)
        try:
            while self.sdk_conn.is_connected:
                self._cozmo.update_needs_level()
                if self._cozmo.needs_charging(
                ) and not self._cozmo.is_sleeping:
                    await self._charge_cycle()
                    await self._cozmo.wake_up_async()
                    self._cozmo_freetime()

                if not self._queue.empty():
                    await self._handel_queue_async()

                if self._cozmo.world.visible_face_count() > 0:
                    face = self._get_visible_face()
                    if face:
                        if face in self._faces:
                            last_seen = self._faces[face]
                            if (datetime.now() -
                                    last_seen).total_seconds() > 60:
                                await self._cozmo_do_async(
                                    self._on_saw_face(face))
                        else:
                            await self._cozmo_do_async(self._on_saw_face(face))

                if self._cozmo.robot.is_picked_up:
                    await self._cozmo_do_async(self._on_picked_up_async())

                if self._cozmo.robot.is_cliff_detected:
                    await self._cozmo_do_async(self._on_cliff_detected_async())

                if self._cozmo.world.visible_object_count(
                        object_type=ObservableObject) > 0:
                    visible_object = self._get_visible_object()
                    if visible_object:
                        if visible_object in self._visible_objects:
                            last_seen = self._visible_objects[visible_object]
                            if (datetime.now() -
                                    last_seen).total_seconds() > 60 * 5:
                                await self._cozmo_do_async(
                                    self._on_new_object_appeared_async(
                                        visible_object))
                        else:
                            await self._cozmo_do_async(
                                self._on_new_object_appeared_async(
                                    visible_object))

                await asyncio.sleep(0.1)
        except:
            print("Unexpected error:", sys.exc_info()[0])

        await self.terminate_async()

    async def _initialize_async(self, robot: cozmo.robot.Robot) -> None:
        self._observe_connection_lost(self.sdk_conn, self._on_connection_lost)
        self._cozmo.set_robot(robot)
        await asyncio.gather(self._cozmo.connect_to_cubes_async(),
                             self._cozmo.get_off_charger_async())
        if self._mqtt_client is not None:
            await self._mqtt_client.connect_async()
        self.cozmo_state = CozmoStates.Connected
        self._cozmo_freetime()

    async def terminate_async(self) -> None:
        print("Terminating")
        if self._mqtt_client is not None:
            await self._mqtt_client.disconnect_async()
        if self.sdk_conn.is_connected:
            print("Sending cozmo back to charger")
            await self._cozmo.stop_all_actions_async()
            self._cozmo.back_to_normal()
            await self._cozmo.get_on_charger_async()

    def _observe_connection_lost(self, connection: CozmoConnection, cb):
        meth = connection.connection_lost

        @functools.wraps(meth)
        def connection_lost(self, exc):
            meth(exc)
            cb()

        connection.connection_lost = types.MethodType(connection_lost,
                                                      connection)

    def _on_connection_lost(self) -> None:
        print("Captured connection lost")
        self.cozmo_state = CozmoStates.ConnectionLost

    def _publish_cozmo_state(self) -> None:
        if self._mqtt_client is not None:
            payload = dict()
            payload["status"] = self.cozmo_state.value
            attributes = dict()
            if self._cozmo.robot:
                attributes["battery_voltage"] = self._cozmo.battery_voltage
            payload["attributes"] = attributes
            self._mqtt_client.publish(COZMO_MQTT_PUBLISHING_TOPIC, payload)

    async def _on_saw_face(self, face: Face) -> None:
        self._faces[face] = datetime.now()
        self.cozmo_state = CozmoStates.SawFace
        print("An face appeared: {}".format(face))
        if face.name:
            await self._cozmo.turn_toward_face_async(face)
            message = self._message_manager.get_hello_message(face)
            await self._cozmo.random_positive_anim_async()
            await self._cozmo.say_async(message)
            if face.known_expression:
                message = self._message_manager.get_fece_expression_message(
                    face.known_expression, face)
                await self._cozmo.say_async(message)
        else:
            message = self._message_manager.get_non_recognized_message(face)
            await self._cozmo.say_async(message)

    async def _on_picked_up_async(self) -> None:
        print("Cozmo was picked up")
        self.cozmo_state = CozmoStates.PickedUp
        face = self._get_visible_face()
        message = self._message_manager.get_picked_up_message(face)
        await self._cozmo.random_positive_anim_async()
        if face:
            await self._cozmo.display_camera_image_async()
        await self._cozmo.say_async(message)
        while self._cozmo.robot.is_picked_up:
            await asyncio.sleep(0.1)
        print("Cozmo was put down")

    async def _on_cliff_detected_async(self) -> None:
        print("Cozmo detected a cliff")
        self.cozmo_state = CozmoStates.OnCliff
        self._cozmo.stop()
        self._cozmo.clear_current_animations()
        await self._cozmo.drive_wheels_async(-40, 1)
        face = self._get_visible_face()
        message = self._message_manager.get_cliff_detected_message(face)
        await self._cozmo.random_negative_anim_async()
        await self._cozmo.say_async(message)
        while self._cozmo.robot.is_cliff_detected:
            await asyncio.sleep(0.1)
        print("Cozmo away from cliff")

    async def _on_new_object_appeared_async(
            self, visible_object: ObservableObject) -> None:
        self._visible_objects[visible_object] = datetime.now()
        print("An obbject appeared: {}".format(visible_object))
        face = self._get_visible_face()
        message = self._message_manager.get_object_appeared_message(
            visible_object, face)
        await self._cozmo.say_async(message)

    def _get_visible_face(self) -> Face:
        if self._cozmo.world.visible_face_count() == 0:
            print("Found no visibile faces")
            return None

        visible_face = next((face for face in self._cozmo.world.visible_faces),
                            None)
        return visible_face

    def _get_visible_object(self) -> ObservableObject:
        if self._cozmo.world.visible_object_count(
                object_type=ObservableObject) == 0:
            print("Found no visibile objects")
            return None

        visible_obj = next((obj for obj in self._cozmo.world.visible_objects),
                           None)
        return visible_obj

    def _cozmo_freetime(self) -> None:
        self._cozmo.start_free_time()
        self.cozmo_state = CozmoStates.Freetime

    async def _cozmo_do_async(self, async_f: Awaitable) -> None:
        if self._cozmo.freetime_enabled:
            self._cozmo.stop_free_time()
        try:
            await async_f
            self._cozmo_freetime()
        except cozmo.RobotBusy:
            print("Task Exception...cozmo is Busy")

    async def _charge_cycle(self) -> None:
        self.cozmo_state = CozmoStates.GoingToCharge
        print("Cozmo needs charging. Battery level {}".format(
            self._cozmo.battery_voltage))
        await self._cozmo.start_charging_routine_async()
        self.cozmo_state = CozmoStates.Charging
        await self._cozmo.charge_to_full_async()
        print("Cozmo charged")

    # MQTT Queue Related-------------------------------------------------------------------------------------------------------------------
    def _on_mqtt_message(self, client, topic, payload, qos,
                         properties) -> None:
        try:
            json_data = json.loads(payload.decode('utf-8'))
            print("Topic: {}".format(topic))
            print("Data: {}".format(json_data))
            topic_data_tuple = (topic, json_data)
            self._queue.put(topic_data_tuple)
        except:
            print("Unexpected error:", sys.exc_info()[0])

    async def _handel_queue_async(self) -> None:
        if not self._queue.empty():
            print("Cozmo processing queue")
            await self._cozmo_do_async(
                self._process_message_async(self._queue.get()))
            await self._handel_queue_async()

    async def _process_message_async(self, topic_data_tuple: tuple) -> None:
        topic = topic_data_tuple[0]
        json_data = topic_data_tuple[1]
        if topic == MQTT_WEATHER_TOPIC:
            await self._process_weather_notification_async(json_data)
        elif topic == MQTT_CONTROL_TOPIC:
            await self._process_control_msg_async(json_data)

    async def _process_control_msg_async(self, json_data: dict) -> None:
        if "msg" in json_data:
            msg = json_data["msg"]
            if msg == 'sleep':
                asyncio.create_task(self._cozmo.sleep_async())
            if msg == 'freetime':
                await self._cozmo.wake_up_async()
                self._cozmo_freetime()

    async def _process_weather_notification_async(self,
                                                  json_data: dict) -> None:
        if "msg" in json_data:
            msg = json_data["msg"]
            image_url = None
            color = None
            title = "I have a weather update notification for you."
            if "imagePath" in json_data:
                image_url = json_data["imagePath"]
            if 'clear' in msg:
                print("Clear outside!")
                color = YELLOW
            elif 'cloudy' in msg:
                print("Cloudy outside!")
                color = SLATE_GRAY
            await self._cozmo_annonuce_weather_update_async(
                msg, title, color, image_url)

    async def _cozmo_annonuce_weather_update_async(
            self,
            msg: str,
            title: str,
            rgb: Union[Tuple, None] = None,
            image_url: str = None) -> None:
        self.cozmo_state = CozmoStates.Anouncing
        if rgb:
            light = Light(Color(rgb=rgb)).flash()
            self._cozmo.cubes_change_lights(light)
            self._cozmo.backpack_change_light(light)
        await self._cozmo.random_positive_anim_async()
        await self._cozmo.say_async(title)
        await self._cozmo.say_async(msg)
        if image_url:
            await self._cozmo.show_image_from_url_async(image_url)
        if rgb:
            self._cozmo.turn_cubes_lights_off()
            self._cozmo.turn_backpack_light_off()
Ejemplo n.º 46
0
                #     data_read,
                #     data_read/1024,
                #     data_read/1024/1024,
                #     data_read/1024/1024/1024))
        else:
            print("\n\n{}: no pipe detected..".format(my_name), flush=True)
        # print("{}: putting..".format(my_name))
        q.put(ndimg)
        print("{}: done".format(my_name))
    return


if __name__ == "__main__":
    from multiprocessing import Process, SimpleQueue
    rep = 10
    q = SimpleQueue()
    processes = [
        Process(name=a[0], target=read_pipe, args=a)
        for a in [("DOUT", depth_pipe, q, rep), ("COUT", color_pipe, q, rep)]
    ]
    # start and wait for processes
    for p in processes:
        print("posting process {}".format(p.name))
        p.start()

    for _ in range(len(processes) * rep):
        tmp_img = q.get()
    # for _ in range(2):
    #     tmp_img = q.get()
    #     if type(tmp_img) == list:
Ejemplo n.º 47
0
async def servo_loop(device: str,
                     sids: Sequence[int],
                     coder: KanervaCoder,
                     main2gvf: mp.SimpleQueue,
                     gvf2main: mp.SimpleQueue,
                     **kwargs):
    # objects to read and write from servos
    sr, sw = await serial_asyncio.open_serial_connection(url=device,
                                                         **kwargs)

    # set servo speeds to slowest possible
    for sid in sids:
        await send_msg(sr, sw, sid, [0x03, 0x20, 0x00, 0x01])

    # set initial action
    action = initial_action

    # some constants
    read_data = [0x02,  # read
                 0x24,  # starting from 0x24
                 0x08]  # a string of 8 bytes

    # read_all = [0x02,  # read
    #             0x00,  # starting from the beginning
    #             0x32]  # all the bytes

    store_data = []

    try:
        for _ in range(20000):
            # read data from servos
            byte_data = [await send_msg(sr, sw, sid, read_data) for sid in sids]

            # convert to human-readable data
            obs = sum([parse_data(bd) for bd in byte_data], []) + list(action)

            # get active tiles in kanerva coding
            active_pts = coder(obs)

            # send action and features to GVFs
            gvf_data = (obs, active_pts)
            main2gvf.put(gvf_data)

            # get action control GVFs
            action = gvf2main.get()

            # send action to servos
            instructions = [goal_instruction(a)
                            for a in action
                            if a is not None]
            for sid, instr in zip(sids, instructions):
                await send_msg(sr, sw, sid, instr)

            # record data for later
            store_data.append(gvf_data)

        np.save('offline_data.npy', store_data)

    except KeyboardInterrupt:
        pass
    finally:
        sr.read()
        await sw.drain()

        for sid in sids:
            write(sw, sid, [0x03, 0x18, 0x00])  # disable torque
Ejemplo n.º 48
0
def scan_regionset(regionset, options):
    """ This function scans all te region files in a regionset object
    and fills the ScannedRegionFile obj with the results
    """

    total_regions = len(regionset.regions)
    total_chunks = 0
    corrupted_total = 0
    wrong_total = 0
    entities_total = 0
    too_small_total = 0
    unreadable = 0

    # init progress bar
    if not options.verbose:
        pbar = progressbar.ProgressBar(
            widgets=['Scanning: ', FractionWidget(), ' ', progressbar.Percentage(), ' ', progressbar.Bar(left='[',right=']'), ' ', progressbar.ETA()],
            maxval=total_regions)

    # queue used by processes to pass finished stuff
    q = SimpleQueue()
    pool = multiprocessing.Pool(processes=options.processes,
            initializer=_mp_pool_init,initargs=(regionset,options,q))

    if not options.verbose:
        pbar.start()

    # start the pool
    # Note to self: every child process has his own memory space,
    # that means every obj recived by them will be a copy of the
    # main obj
    result = pool.map_async(multithread_scan_regionfile, regionset.list_regions(None), max(1,total_regions//options.processes))

    # printing status
    region_counter = 0

    while not result.ready() or not q.empty():
        time.sleep(0.01)
        if not q.empty():
            r = q.get()
            if r == None: # something went wrong scanning this region file
                          # probably a bug... don't know if it's a good
                          # idea to skip it
                continue
            if not isinstance(r,world.ScannedRegionFile):
                raise ChildProcessException(r)
            else:
                corrupted, wrong, entities_prob, shared_offset, num_chunks = r.get_counters()
                filename = r.filename
                # the obj returned is a copy, overwrite it in regionset
                regionset[r.get_coords()] = r
                corrupted_total += corrupted
                wrong_total += wrong
                total_chunks += num_chunks
                entities_total += entities_prob
                if r.status == world.REGION_TOO_SMALL:
                    too_small_total += 1
                elif r.status == world.REGION_UNREADABLE:
                    unreadable += 1
                region_counter += 1
                if options.verbose:
                  if r.status == world.REGION_OK:
                    stats = "(c: {0}, w: {1}, tme: {2}, so: {3}, t: {4})".format( corrupted, wrong, entities_prob, shared_offset, num_chunks)
                  elif r.status == world.REGION_TOO_SMALL:
                    stats = "(Error: not a region file)"
                  elif r.status == world.REGION_UNREADABLE:
                    stats = "(Error: unreadable region file)"
                  print("Scanned {0: <12} {1:.<43} {2}/{3}".format(filename, stats, region_counter, total_regions))
                else:
                    pbar.update(region_counter)

    if not options.verbose: pbar.finish()

    regionset.scanned = True
Ejemplo n.º 49
0
        unfiltered = [filepath]

    contracts += [u for u in unfiltered if pattern.match(u) is not None]

contracts = contracts[args.skip:]

log("Setting up workers.")
# Set up multiprocessing result list and queue.
manager = Manager()

# This list contains analysis results as
# (filename, category, meta, analytics) quadruples.
res_list = manager.list()

# Holds results transiently before flushing to res_list
res_queue = SimpleQueue()

# Start the periodic flush process, only run while run_signal is set.
run_signal = Event()
run_signal.set()
flush_proc = Process(target=flush_queue,
                     args=(run_signal, res_queue, res_list))
flush_proc.start()

workers = []
avail_jobs = list(range(args.jobs))
contract_iter = enumerate(contracts)
contracts_exhausted = False

log("Analysing...\n")
try:
from multiprocessing import Pool, Queue, SimpleQueue
import re, nltk,  pandas as pd, queue, os
from nltk.tokenize.toktok import ToktokTokenizer
from textblob import Word
from nltk.corpus import wordnet

stopword_list = nltk.corpus.stopwords.words('english')
tokenizer = ToktokTokenizer()
resultQueue = queue.Queue()
resultSimpleQueue = SimpleQueue()
q = Queue()

def processText(text):
    try:
        row = str(text).lower()
        row = row.replace("n't", " not")
        row = row.replace("'s", "")
        row = re.sub(r'[^a-zA-Z0-9]', ' ', row)
        row = re.sub(r'\s+', ' ', row)
        tokens = tokenizer.tokenize(row)
        tokens = " ".join(Word(token.strip()).lemmatize() for token in tokens if token not in stopword_list)
        return tokens
    except Exception as e:
        print(e)

def lemmatizeAndClean(text):
    try:
        print('parent id:', os.getppid(), 'process id:', os.getpid())
        tokens = processText(text)
        print('result', tokens)
        resultSimpleQueue.put(tokens)
Ejemplo n.º 51
0
def learning_loop(exit_flag: mp.Value,
                  gvfs: Sequence[Sequence[GTDLearner]],
                  main2gvf: mp.SimpleQueue,
                  gvf2plot: mp.SimpleQueue,
                  parsrs: List[Callable]):
    action, action_prob, obs, x = None, None, None, None

    # get first state
    while exit_flag.value == 0 and obs is None:
        while exit_flag.value == 0 and main2gvf.empty():
            time.sleep(0.01)
        if exit_flag.value == 0:
            if locks:
                print('gvf gm a 1 a')
                gmlock.acquire()
                print('gvf gm a 1 b')
            action, action_prob, obs, x = main2gvf.get()
            if locks:
                print('gvf gm r 1 a')
                gmlock.release()
                print('gvf gm r 1 b')

    # main loop
    # tt = 0
    # ts = []
    while exit_flag.value == 0:
        # ts.append(time.time() - tt) if tt > 0 else None
        # print(np.mean(ts))
        # tt = time.time()
        if locks:
            print('gvf gm a 2 a')
            gmlock.acquire()
            print('gvf gm a 2 b')
        while exit_flag.value == 0 and main2gvf.empty():
            if locks:
                print('gvf gm r 2 a')
                gmlock.release()
                print('gvf gm r 2 b')
            time.sleep(0.01)
            if locks:
                print('gvf gm a 3 a')
                gmlock.acquire()
                print('gvf gm a 3 b')
        if locks:
            print('gvf gm r 3 a')
            gmlock.release()
            print('gvf gm r 3 b')
        if exit_flag.value:
            break

        # get data from servos
        if locks:
            print('gvf gm a 4 a')
            gmlock.acquire()
            print('gvf gm a 4 b')
        actionp, action_probp, obsp, xp = main2gvf.get()
        if locks:
            print('gvf gm r 4 a')
            gmlock.release()
            print('gvf gm r 4 b')
        # update weights
        for gs, xi, xpi in zip(gvfs, x, xp):
            for g in gs:
                g.update(action, action_prob, obs, obsp, xi, xpi)

        # send data to plots
        gdata = [g.data(xi, obs, action, xpi, obsp)
                 for gs, xi, xpi in zip(gvfs, x, xp)
                 for g in gs]

        data = dict(ChainMap(*gdata))
        data['obs'] = obs
        data['x'] = x
        data = [parse(data) for parse in parsrs]
        if locks:
            print('gvf gp a 1 a')
            gplock.acquire()
            print('gvf gp a 1 b')
        # data = np.copy(data)
        gvf2plot.put(data)
        if locks:
            print('gvf gp r 1 a')
            gplock.release()
            print('gvf gp r 1 b')

        # go to next state
        obs = obsp
        x = xp
        action = actionp
        action_prob = action_probp

    print('Done learning!')
Ejemplo n.º 52
0
class DynamodbToS3Test(unittest.TestCase):
    def setUp(self):
        self.output_queue = SimpleQueue()

        def mock_upload_file(Filename, Bucket, Key):  # pylint: disable=unused-argument,invalid-name
            with open(Filename) as f:
                lines = f.readlines()
                for line in lines:
                    self.output_queue.put(json.loads(line))

        self.mock_upload_file_func = mock_upload_file

    def output_queue_to_list(self):
        items = []
        while not self.output_queue.empty():
            items.append(self.output_queue.get())
        return items

    @patch('airflow.contrib.operators.dynamodb_to_s3.S3Hook')
    @patch('airflow.contrib.operators.dynamodb_to_s3.AwsDynamoDBHook')
    def test_dynamodb_to_s3_success(self, mock_aws_dynamodb_hook,
                                    mock_s3_hook):
        responses = [
            {
                'Items': [{
                    'a': 1
                }, {
                    'b': 2
                }],
                'LastEvaluatedKey': '123',
            },
            {
                'Items': [{
                    'c': 3
                }],
            },
        ]
        table = MagicMock()
        table.return_value.scan.side_effect = responses
        mock_aws_dynamodb_hook.return_value.get_conn.return_value.Table = table

        s3_client = MagicMock()
        s3_client.return_value.upload_file = self.mock_upload_file_func
        mock_s3_hook.return_value.get_conn = s3_client

        dynamodb_to_s3_operator = DynamoDBToS3Operator(
            task_id='dynamodb_to_s3',
            dynamodb_table_name='airflow_rocks',
            s3_bucket_name='airflow-bucket',
            file_size=4000,
        )

        dynamodb_to_s3_operator.execute(context={})

        self.assertEqual([{
            'a': 1
        }, {
            'b': 2
        }, {
            'c': 3
        }], self.output_queue_to_list())
Ejemplo n.º 53
0
class GMailWorker(object):
    """
        Background GMail SMTP sender

        This class runs a GMail connection object in the background (using 
        the multiprocessing module) which accepts messages through a 
        simple queue. No feedback is provided.

        The worker object should be closed on exit (will otherwise prevent
        the interpreter from exiting).

        The object provides a similar api to the Gmail object.

        Basic usage:

        >>> gmail_worker = GMailWorker('A.User <*****@*****.**>','password')
        >>> msg = Message('Test Message',to='xyz <*****@*****.**',text='Hello')
        >>> gmail_worker.send(msg)
        >>> gmail_worker.close()

    """
    def __init__(self, username, password, debug=False):
        """
            GMail SMTP connection worker

            username    : GMail username 
                          This can either be a simple address ('*****@*****.**') 
                          or can include a name ('"A User" <*****@*****.**>').
                          
                          The username specified is used as the sender address

            password    : GMail password
            debug       : Debug flag (passed to smtplib)

            Runs '_gmail_worker' helper in background using multiprocessing
            module.

            '_gmail_worker' loops listening for new message objects on the
            shared queue and sends these using the GMail SMTP connection.
        """
        self.queue = SimpleQueue()
        self.worker = Process(target=_gmail_worker,
                              args=(username, password, self.queue, debug))
        self.worker.start()

    def send(self, message, rcpt=None):
        """
            message         : email.Message instance
            rcpt            : List of recipients (normally parsed from
                              To/Cc/Bcc fields)

            Send message object via background worker
        """
        self.queue.put((message, rcpt))

    def close(self):
        """
            Close down background worker
        """
        self.queue.put(('QUIT', None))

    def __del__(self):
        self.close()
Ejemplo n.º 54
0
runtime_files = filter(lambda filename: pattern.match(filename) is not None,
                       unfiltered)

stop_index = None if args.num_contracts is None else args.skip + args.num_contracts
to_process = itertools.islice(runtime_files, args.skip, stop_index)

log("Setting up workers.")
# Set up multiprocessing result list and queue.
manager = Manager()

# This list contains analysis results as
# (filename, category, meta, analytics) quadruples.
res_list = manager.list()

# Holds results transiently before flushing to res_list
res_queue = SimpleQueue()

# Start the periodic flush process, only run while run_signal is set.
run_signal = Event()
run_signal.set()
flush_proc = Process(target=flush_queue,
                     args=(FLUSH_PERIOD, run_signal, res_queue, res_list))
flush_proc.start()

workers = []
avail_jobs = list(range(args.jobs))
contract_iter = enumerate(to_process)
contracts_exhausted = False

log("Analysing...\n")
try:
Ejemplo n.º 55
0
    def measure_cmd_resources(self, cmd, name, legend, save_bs=False):
        """Measure system resource usage of a command"""
        def _worker(data_q, cmd, **kwargs):
            """Worker process for measuring resources"""
            try:
                start_time = datetime.now()
                ret = runCmd2(cmd, **kwargs)
                etime = datetime.now() - start_time
                rusage_struct = resource.getrusage(resource.RUSAGE_CHILDREN)
                iostat = OrderedDict()
                with open('/proc/{}/io'.format(os.getpid())) as fobj:
                    for line in fobj.readlines():
                        key, val = line.split(':')
                        iostat[key] = int(val)
                rusage = OrderedDict()
                # Skip unused fields, (i.e. 'ru_ixrss', 'ru_idrss', 'ru_isrss',
                # 'ru_nswap', 'ru_msgsnd', 'ru_msgrcv' and 'ru_nsignals')
                for key in ['ru_utime', 'ru_stime', 'ru_maxrss', 'ru_minflt',
                            'ru_majflt', 'ru_inblock', 'ru_oublock',
                            'ru_nvcsw', 'ru_nivcsw']:
                    rusage[key] = getattr(rusage_struct, key)
                data_q.put({'ret': ret,
                            'start_time': start_time,
                            'elapsed_time': etime,
                            'rusage': rusage,
                            'iostat': iostat})
            except Exception as err:
                data_q.put(err)

        cmd_str = cmd if isinstance(cmd, str) else ' '.join(cmd)
        log.info("Timing command: %s", cmd_str)
        data_q = SimpleQueue()
        try:
            proc = Process(target=_worker, args=(data_q, cmd,))
            proc.start()
            data = data_q.get()
            proc.join()
            if isinstance(data, Exception):
                raise data
        except CommandError:
            log.error("Command '%s' failed", cmd_str)
            raise
        etime = data['elapsed_time']

        measurement = OrderedDict([('type', self.SYSRES),
                                   ('name', name),
                                   ('legend', legend)])
        measurement['values'] = OrderedDict([('start_time', data['start_time']),
                                             ('elapsed_time', etime),
                                             ('rusage', data['rusage']),
                                             ('iostat', data['iostat'])])
        if save_bs:
            self.save_buildstats(name)

        self._append_measurement(measurement)

        # Append to 'times' array for globalres log
        e_sec = etime.total_seconds()
        self.times.append('{:d}:{:02d}:{:05.2f}'.format(int(e_sec / 3600),
                                                      int((e_sec % 3600) / 60),
                                                       e_sec % 60))
Ejemplo n.º 56
0
def job(n: int, results: SimpleQueue) -> None:  # <6>
    results.put((n, check(n)))  # <7>
Ejemplo n.º 57
0
    scafsToInclude = [line.rstrip() for line in scafsFile.readlines()]
    sys.stderr.write("{} scaffolds will be analysed.".format(
        len(scafsToInclude)))
    scafsFile.close()
else:
    scafsToInclude = None

##########################################################################################################

#counting stat that will let keep track of how far we are
windowsQueued = 0
resultsReceived = 0
resultsWritten = 0
resultsHandled = 0
'''Create queues to hold the data one will hold the line info to be passed to the analysis'''
windowQueue = SimpleQueue()
#one will hold the results (in the order they come)
resultQueue = SimpleQueue()
#one will hold the sorted results to be written
writeQueue = SimpleQueue()
'''start worker Processes for analysis. The comand should be tailored for the analysis wrapper function
of course these will only start doing anything after we put data into the line queue
the function we call is actually a wrapper for another function.(s) This one reads from the line queue, passes to some analysis function(s), gets the results and sends to the result queue'''
workerThreads = []
sys.stderr.write("\nStarting {} worker threads\n".format(args.threads))
for x in range(args.threads):
    workerThread = Process(
        target=stats_wrapper,
        args=(windowQueue, resultQueue, windType, genoFormat, sampleData,
              minSites, args.analysis, stats, args.addWindowID, args.roundTo))
    workerThread.daemon = True
Ejemplo n.º 58
0
from play import index2coord, make_play
from self_play import top_one_action, new_subtree
from conf import conf
from multiprocessing import Queue, Pool, Lock, SimpleQueue, Manager
from predicting_queue_worker import put_predict_request

MCTS_SIMULATIONS_PROCESSES = conf['ENERGY']
GPUS = conf['GPUs']
board_queue = Queue()
subtree_queue = Queue()
simulation_result_queue = {}
for i in range(conf['N_GAME_PROCESS']):
    simulation_result_queue[i] = SimpleQueue()

process_pool = None
lock = None


def init_simulation_workers():
    global process_pool
    global lock
    lock = Lock()
    process_pool = Pool(processes=MCTS_SIMULATIONS_PROCESSES, initializer=init_pool_param, initargs=(lock,))


def init_simulation_workers_by_gpuid(GPU_ID):
    global process_pool
    process_pool = Pool(processes=MCTS_SIMULATIONS_PROCESSES)


def init_pool_param(l):
Ejemplo n.º 59
0
 def __init__(self, name, bucket_func):
     self.buckets = Bucket(name)
     self.q = SimpleQueue()
     self.get_bucket = bucket_func
Ejemplo n.º 60
0
 def __init__(self):
     self.node_queue = SimpleQueue()