예제 #1
0
def export_table(host, port, auth_key, db, table, directory, fields, delimiter, format,
                 error_queue, progress_info, sindex_counter, exit_event):
    writer = None

    try:
        # This will open at least one connection for each rdb_call_wrapper, which is
        # a little wasteful, but shouldn't be a big performance hit
        conn_fn = lambda: r.connect(host, port, auth_key=auth_key)
        table_info = rdb_call_wrapper(conn_fn, "info", write_table_metadata, db, table, directory)
        sindex_counter.value += len(table_info["indexes"])

        task_queue = SimpleQueue()
        writer = launch_writer(format, directory, db, table, fields, delimiter, task_queue, error_queue)
        writer.start()

        rdb_call_wrapper(conn_fn, "table scan", read_table_into_queue, db, table,
                         table_info["primary_key"], task_queue, progress_info, exit_event)
    except (r.ReqlError, r.ReqlDriverError) as ex:
        error_queue.put((RuntimeError, RuntimeError(ex.message), traceback.extract_tb(sys.exc_info()[2])))
    except:
        ex_type, ex_class, tb = sys.exc_info()
        error_queue.put((ex_type, ex_class, traceback.extract_tb(tb)))
    finally:
        if writer is not None and writer.is_alive():
            task_queue.put(StopIteration())
            writer.join()
예제 #2
0
파일: workflow.py 프로젝트: raysect/source
    def run(self, tasks, render, update, render_args=(), render_kwargs={}, update_args=(), update_kwargs={}):

        # establish ipc queues using a manager process
        task_queue = SimpleQueue()
        result_queue = SimpleQueue()

        # start process to generate image samples
        producer = Process(target=self._producer, args=(tasks, task_queue))
        producer.start()

        # start worker processes
        workers = []
        for pid in range(self._processes):
            p = Process(target=self._worker, args=(render, render_args, render_kwargs, task_queue, result_queue))
            p.start()
            workers.append(p)

        # consume results
        for _ in tasks:
            result = result_queue.get()
            update(result, *update_args, **update_kwargs)

        # shutdown workers
        for _ in workers:
            task_queue.put(None)
예제 #3
0
async def data_from_file(main2gvf: mp.SimpleQueue,
                         coder: KanervaCoder):
    data = np.load('offline_data.npy')
    for i, item in enumerate(data):
        # if i > 500:
        #     break
        item[-1] = coder(x1=item[-1], x2=item[-2])
        main2gvf.put(item)
예제 #4
0
async def data_from_file(main2gvf: mp.SimpleQueue,
                         gvf2plot: mp.SimpleQueue,
                         coder: KanervaCoder):
    data = np.load('offline_data.npy')

    for item in data:
        item[-1] = coder(item[-2])
        main2gvf.put(item)

    time.sleep(0.1)
    while not gvf2plot.empty():
        time.sleep(0.1)
예제 #5
0
def learning_loop(exit_flag: mp.Value,
                  gvfs: Sequence[Sequence[GTDLearner]],
                  main2gvf: mp.SimpleQueue,
                  gvf2plot: mp.SimpleQueue):
    action, action_prob, obs, x = None, None, None, None

    # get first state
    while exit_flag.value == 0 and obs is None:
        while exit_flag.value == 0 and main2gvf.empty():
            time.sleep(0.001)
        if exit_flag.value == 0:
            action, action_prob, obs, x = main2gvf.get()

    i = 1

    # main loop
    while exit_flag.value == 0:
        while exit_flag.value == 0 and main2gvf.empty():
            time.sleep(0.001)
        if exit_flag.value:
            break

        i += 1
        ude = False
        rupee = False
        if 5000 < i < 5100:
            ude = True
        if i == 7000:
            rupee = True

        # get data from servos
        actionp, action_probp, obsp, xp = main2gvf.get()

        # update weights
        for gs, xi, xpi in zip(gvfs, x, xp):
            for g in gs:
                g.update(action, action_prob, obs, obsp, xi, xpi, ude, rupee)

        # send data to plots
        gdata = [[g.data(xi, obs, action, xpi, obsp)
                  for g in gs]
                 for gs, xi, xpi in zip(gvfs, x, xp)]
        data = dict(ChainMap(*chain.from_iterable(gdata)))
        data['obs'] = obs
        gvf2plot.put(data)

        # go to next state
        obs = obsp
        x = xp
        action = actionp
        action_prob = action_probp

    print('Done learning!')
def start(parsed_args):
    from multiprocessing import Process, SimpleQueue

    processes = []
    msg_queue = SimpleQueue()
    word_count_queue = SimpleQueue()
    unique_words_queue = SimpleQueue()
    median_queue = SimpleQueue()

    # Prep workers to read from msg queue and write to other queues
    for i in range(workers):
        p = Process(target=worker,
                      args=(msg_queue, unique_words_queue, word_count_queue))
        processes.append(p)
        p.start()

    # Prep a process to accumulate word_count_queue for ft1.txt
    p = Process(target=accumulator,
                  args=(word_count_queue, parsed_args.outdir))
    processes.append(p)
    p.start()

    # Prep a process to re-sequence unique words counted
    p = Process(target=buffered_resequener,
                  args=(unique_words_queue, median_queue))
    processes.append(p)
    p.start()

    # Prep a process to keep a running median of unique words for ft2.txt
    p = Process(target=running_median,
                  args=(median_queue, parsed_args.outdir))
    processes.append(p)
    p.start()

    # Start reading msgs for the msg_queue
    ingest(parsed_args.file, msg_queue)

    # Sending an indication to stop, one for each worker
    for i in range(workers):
        msg_queue.put(None)

    # This step gathers the child processes, but may be unnecessary
    for p in processes:
        p.join()
예제 #7
0
def learning_loop(exit_flag: mp.Value,
                  gvfs: Sequence[GTDLearner],
                  main2gvf: mp.SimpleQueue,
                  gvf2plot: mp.SimpleQueue):
    action, action_prob, obs, x = None, None, None, None

    # get first state
    while exit_flag.value == 0 and obs is None:
        while exit_flag.value == 0 and main2gvf.empty():
            time.sleep(0.001)
        if exit_flag.value == 0:
            action, action_prob, obs, x = main2gvf.get()

    # main loop
    while exit_flag.value == 0:
        while exit_flag.value == 0 and main2gvf.empty():
            time.sleep(0.001)
        if exit_flag.value:
            break

        # get data from servos
        actionp, action_probp, obsp, xp = main2gvf.get()

        # update weights
        for g in gvfs:
            g.update(action, action_prob, obs, obsp, x, xp)

        # send data to plots
        data = [[obs]] + [g.data(x, obs, action, xp, obsp) for g in gvfs]
        gvf2plot.put(data)

        # go to next state
        obs = obsp
        x = xp
        action = actionp
        action_prob = action_probp
예제 #8
0
def learning_loop(exit_flag: mp.Value,
                  gvfs: Sequence[Sequence[Learner]],
                  behaviour_gvf: SARSA,
                  main2gvf: mp.SimpleQueue,
                  gvf2main: mp.SimpleQueue,
                  gvf2plot: mp.SimpleQueue):
    action, action_prob, obs, x = None, None, None, None

    # get first state
    while exit_flag.value == 0 and obs is None:
        while exit_flag.value == 0 and main2gvf.empty():
            time.sleep(0.001)
        if exit_flag.value == 0:
            obs, x = main2gvf.get()
            action, action_prob = behaviour_gvf.policy(obs=obs, x=x)
            gvf2main.put(action)

    # main loop
    while exit_flag.value == 0:
        while exit_flag.value == 0 and main2gvf.empty():
            time.sleep(0.001)
        if exit_flag.value:
            break

        # get data from servos
        obsp, xp = main2gvf.get()
        actionp, action_probp = behaviour_gvf.policy(obs=obsp, x=xp)

        # update weights
        for g in chain.from_iterable(gvfs):
            g.update(x, obs,
                     action, action_prob,
                     xp, obsp,
                     actionp, action_probp)

        # send action
        gvf2main.put(actionp)

        # send data to plots
        gdata = [[g.data(x, obs, action, xp, obsp)
                  for g in gs]
                 for gs in gvfs]
        data = dict(ChainMap(*chain.from_iterable(gdata)))
        data['obs'] = obs
        gvf2plot.put(data)

        # go to next state
        obs = obsp
        x = xp
        action = actionp
        action_prob = action_probp

    print('Done learning!')
예제 #9
0
파일: blocking.py 프로젝트: MSusik/beard
    def _fit(self, X, y, blocks):
        """Fit base clustering estimators on X."""
        self.blocks_ = blocks

        processes = []
        # Here the blocks will be passed to subprocesses
        data_queue = SimpleQueue()
        # Here the results will be passed back
        result_queue = SimpleQueue()
        for x in range(self.n_jobs):
            processes.append(mp.Process(target=_parallel_fit, args=(self.fit_,
                             self.partial_fit_, self.base_estimator,
                             self.verbose, data_queue, result_queue)))
            processes[-1].start()

        # First n_jobs blocks are sent into the queue without waiting for the
        # results. This variable is a counter that takes care of this.
        presend = 0
        blocks_computed = 0
        blocks_all = len(np.unique(blocks))

        for block in self._blocks(X, y, blocks):
            if presend >= self.n_jobs:
                b, clusterer = result_queue.get()
                blocks_computed += 1
                if clusterer:
                    self.clusterers_[b] = clusterer
            else:
                presend += 1
            if self.partial_fit_:
                if block[0] in self.clusterers_:
                    data_queue.put(('middle', block, self.clusterers_[b]))
                    continue

            data_queue.put(('middle', block, None))

        # Get the last results and tell the subprocesses to finish
        for x in range(self.n_jobs):
            if blocks_computed < blocks_all:
                print("%s blocks computed out of %s" % (blocks_computed,
                                                        blocks_all))
                b, clusterer = result_queue.get()
                blocks_computed += 1
                if clusterer:
                    self.clusterers_[b] = clusterer

        data_queue.put(('end', None, None))

        time.sleep(1)

        return self
예제 #10
0
            except StopIteration:
                contracts_exhausted = True

        # Loop until some process terminates (to retask it) or,
        # if there are no unanalysed contracts left, until currently-running contracts are done
        while len(avail_jobs) == 0 or (contracts_exhausted
                                       and 0 < len(workers)):
            to_remove = []
            for i in range(len(workers)):
                start_time = workers[i]["time"]
                proc = workers[i]["proc"]
                name = workers[i]["name"]
                job_index = workers[i]["job_index"]

                if time.time() - start_time > args.timeout_secs:
                    res_queue.put((name, [], ["TIMEOUT"], {}))
                    proc.terminate()
                    log("{} timed out.".format(name))
                    to_remove.append(i)
                    avail_jobs.append(job_index)
                elif not proc.is_alive():
                    to_remove.append(i)
                    proc.join()
                    avail_jobs.append(job_index)

            # Reverse index order so as to pop elements correctly
            for i in reversed(to_remove):
                workers.pop(i)

            time.sleep(0.01)
예제 #11
0
                                                   overlap,
                                                   maxDist,
                                                   minSites,
                                                   headerLine=args.header,
                                                   names=sampleData.indNames,
                                                   include=scafsToInclude,
                                                   exclude=scafsToExclude)
else:
    windowGenerator = genomics.predefinedCoordWindows(
        genoFile,
        windCoords,
        headerLine=args.header,
        names=sampleData.indNames)

for window in windowGenerator:
    windowQueue.put((windowsQueued, window))
    windowsQueued += 1

############################################################################################################################################

#Now we send completion signals to all worker threads
for x in range(args.threads):
    windowQueue.put((
        -1,
        None,
    ))  # -1 tells the threads to break

sys.stderr.write("\nWaiting for all threads to finish\n".format(args.threads))
for x in range(len(workerThreads)):
    workerThreads[x].join()
예제 #12
0
class GMailWorker(object):
    """
        Background GMail SMTP sender

        This class runs a GMail connection object in the background (using 
        the multiprocessing module) which accepts messages through a 
        simple queue. No feedback is provided.

        The worker object should be closed on exit (will otherwise prevent
        the interpreter from exiting).

        The object provides a similar api to the Gmail object.

        Basic usage:

        >>> gmail_worker = GMailWorker('A.User <*****@*****.**>','password')
        >>> msg = Message('Test Message',to='xyz <*****@*****.**',text='Hello')
        >>> gmail_worker.send(msg)
        >>> gmail_worker.close()

    """
    def __init__(self, username, password, debug=False):
        """
            GMail SMTP connection worker

            username    : GMail username 
                          This can either be a simple address ('*****@*****.**') 
                          or can include a name ('"A User" <*****@*****.**>').
                          
                          The username specified is used as the sender address

            password    : GMail password
            debug       : Debug flag (passed to smtplib)

            Runs '_gmail_worker' helper in background using multiprocessing
            module.

            '_gmail_worker' loops listening for new message objects on the
            shared queue and sends these using the GMail SMTP connection.
        """
        self.queue = SimpleQueue()
        self.worker = Process(target=_gmail_worker,
                              args=(username, password, self.queue, debug))
        self.worker.start()

    def send(self, message, rcpt=None):
        """
            message         : email.Message instance
            rcpt            : List of recipients (normally parsed from
                              To/Cc/Bcc fields)

            Send message object via background worker
        """
        self.queue.put((message, rcpt))

    def close(self):
        """
            Close down background worker
        """
        self.queue.put(('QUIT', None))

    def __del__(self):
        self.close()
예제 #13
0
            while True:
                try:
                    if not self.send_queue.empty(): # If their is something to send
                        message = self.send_queue.get()
                        message = str(len(message.encode())).zfill(4) + message # Prefix four digit byte amount
                        connection.send(message.encode())
                        print("Sent message")
                    size_prefix = connection.recv(4).decode() # Recieve four bytes
                    if size_prefix == '': # If disconnected
                        print("Connection finished")
                        break
                    if size_prefix: # If message was recieved
                        self.recv_queue.put(connection.recv(int(size_prefix)).decode())
                        print("Recieved message")
                except Exception as error:
                    if error.errno != EWOULDBLOCK: # Remove error messages due to non-blocking
                        print(error)


# Setup process stuff

recv_queue = SimpleQueue()
send_queue = SimpleQueue()

server = Server(send_queue, recv_queue)
server.start()

while True:
    send_queue.put("mayonaise")
    print(recv_queue.get())
예제 #14
0
def spawn_import_clients(options, files_info):
    # Spawn one reader process for each db.table, as well as many client processes
    task_queue = SimpleQueue()
    error_queue = SimpleQueue()
    exit_event = multiprocessing.Event()
    interrupt_event = multiprocessing.Event()
    errors = []
    reader_procs = []
    client_procs = []

    parent_pid = os.getpid()
    signal.signal(signal.SIGINT, lambda a, b: abort_import(a, b, parent_pid, exit_event, task_queue, client_procs, interrupt_event))

    try:
        progress_info = []
        rows_written = multiprocessing.Value(ctypes.c_longlong, 0)

        for i in xrange(options["clients"]):
            client_procs.append(multiprocessing.Process(target=client_process,
                                                        args=(options["host"],
                                                              options["port"],
                                                              options["auth_key"],
                                                              task_queue,
                                                              error_queue,
                                                              rows_written,
                                                              options["force"],
                                                              options["durability"])))
            client_procs[-1].start()

        for file_info in files_info:
            progress_info.append((multiprocessing.Value(ctypes.c_longlong, -1), # Current lines/bytes processed
                                  multiprocessing.Value(ctypes.c_longlong, 0))) # Total lines/bytes to process
            reader_procs.append(multiprocessing.Process(target=table_reader,
                                                        args=(options,
                                                              file_info,
                                                              task_queue,
                                                              error_queue,
                                                              progress_info[-1],
                                                              exit_event)))
            reader_procs[-1].start()

        # Wait for all reader processes to finish - hooray, polling
        while len(reader_procs) > 0:
            time.sleep(0.1)
            # If an error has occurred, exit out early
            while not error_queue.empty():
                exit_event.set()
                errors.append(error_queue.get())
            reader_procs = [proc for proc in reader_procs if proc.is_alive()]
            update_progress(progress_info)

        # Wait for all clients to finish
        alive_clients = sum([client.is_alive() for client in client_procs])
        for i in xrange(alive_clients):
            task_queue.put(StopIteration())

        while len(client_procs) > 0:
            time.sleep(0.1)
            client_procs = [client for client in client_procs if client.is_alive()]

        # If we were successful, make sure 100% progress is reported
        if len(errors) == 0 and not interrupt_event.is_set():
            print_progress(1.0)

        def plural(num, text):
            return "%d %s%s" % (num, text, "" if num == 1 else "s")

        # Continue past the progress output line
        print("")
        print("%s imported in %s" % (plural(rows_written.value, "row"),
                                     plural(len(files_info), "table")))
    finally:
        signal.signal(signal.SIGINT, signal.SIG_DFL)

    if interrupt_event.is_set():
        raise RuntimeError("Interrupted")

    if len(errors) != 0:
        # multiprocessing queues don't handling tracebacks, so they've already been stringified in the queue
        for error in errors:
            print("%s" % error[1], file=sys.stderr)
            if options["debug"]:
                print("%s traceback: %s" % (error[0].__name__, error[2]), file=sys.stderr)
            if len(error) == 4:
                print("In file: %s" % error[3], file=sys.stderr)
        raise RuntimeError("Errors occurred during import")
예제 #15
0
class GMailWorker(object):

    """
        Background GMail SMTP sender

        This class runs a GMail connection object in the background (using 
        the multiprocessing module) which accepts messages through a 
        simple queue. No feedback is provided.

        The worker object should be closed on exit (will otherwise prevent
        the interpreter from exiting).

        The object provides a similar api to the Gmail object.

        Basic usage:

        >>> gmail_worker = GMailWorker('A.User <*****@*****.**>','password')
        >>> msg = Message('Test Message',to='xyz <*****@*****.**',text='Hello')
        >>> gmail_worker.send(msg)
        >>> gmail_worker.close()

    """
    def __init__(self,username,password,debug=False):
        """
            GMail SMTP connection worker

            username    : GMail username 
                          This can either be a simple address ('*****@*****.**') 
                          or can include a name ('"A User" <*****@*****.**>').
                          
                          The username specified is used as the sender address

            password    : GMail password
            debug       : Debug flag (passed to smtplib)

            Runs '_gmail_worker' helper in background using multiprocessing
            module.

            '_gmail_worker' loops listening for new message objects on the
            shared queue and sends these using the GMail SMTP connection.
        """
        self.queue = SimpleQueue()
        self.worker = Process(target=_gmail_worker,args=(username,password,self.queue,debug))
        self.worker.start()

    def send(self,message,rcpt=None):
        """
            message         : email.Message instance
            rcpt            : List of recipients (normally parsed from
                              To/Cc/Bcc fields)

            Send message object via background worker
        """
        self.queue.put((message,rcpt))

    def close(self):
        """
            Close down background worker
        """
        self.queue.put(('QUIT',None))

    def __del__(self):
        self.close()
예제 #16
0
def export_table(db, table, directory, fields, delimiter, format, error_queue,
                 progress_info, sindex_counter, exit_event):
    writer = None

    try:
        # -- get table info

        table_info = utils_common.retryQuery('table info: %s.%s' % (db, table),
                                             r.db(db).table(table).info())

        # Rather than just the index names, store all index information
        table_info['indexes'] = utils_common.retryQuery(
            'table index data %s.%s' % (db, table),
            r.db(db).table(table).index_status(),
            runOptions={'binary_format': 'raw'})

        with open(os.path.join(directory, db, table + '.info'),
                  'w') as info_file:
            info_file.write(json.dumps(table_info) + "\n")

        sindex_counter.value += len(table_info["indexes"])

        # -- start the writer

        task_queue = SimpleQueue()
        writer = None
        if format == "json":
            filename = directory + "/%s/%s.json" % (db, table)
            writer = multiprocessing.Process(target=json_writer,
                                             args=(filename, fields,
                                                   task_queue, error_queue,
                                                   format))
        elif format == "csv":
            filename = directory + "/%s/%s.csv" % (db, table)
            writer = multiprocessing.Process(target=csv_writer,
                                             args=(filename, fields, delimiter,
                                                   task_queue, error_queue))
        elif format == "ndjson":
            filename = directory + "/%s/%s.ndjson" % (db, table)
            writer = multiprocessing.Process(target=json_writer,
                                             args=(filename, fields,
                                                   task_queue, error_queue,
                                                   format))
        else:
            raise RuntimeError("unknown format type: %s" % format)
        writer.start()

        # -- read in the data source

        # -

        lastPrimaryKey = None
        read_rows = 0
        cursor = utils_common.retryQuery(
            'inital cursor for %s.%s' % (db, table),
            r.db(db).table(table).order_by(index=table_info["primary_key"]),
            runOptions={
                "time_format": "raw",
                "binary_format": "raw"
            })
        while not exit_event.is_set():
            try:
                for row in cursor:
                    # bail on exit
                    if exit_event.is_set():
                        break

                    # add to the output queue
                    task_queue.put([row])
                    lastPrimaryKey = row[table_info["primary_key"]]
                    read_rows += 1

                    # Update the progress every 20 rows
                    if read_rows % 20 == 0:
                        progress_info[0].value = read_rows

                else:
                    # Export is done - since we used estimates earlier, update the actual table size
                    progress_info[0].value = read_rows
                    progress_info[1].value = read_rows
                    break

            except (r.ReqlTimeoutError, r.ReqlDriverError) as e:
                # connection problem, re-setup the cursor
                try:
                    cursor.close()
                except Exception:
                    pass
                cursor = utils_common.retryQuery(
                    'backup cursor for %s.%s' % (db, table),
                    r.db(db).table(table).between(
                        lastPrimaryKey, None, left_bound="open").order_by(
                            index=table_info["primary_key"]),
                    runOptions={
                        "time_format": "raw",
                        "binary_format": "raw"
                    })

    except (r.ReqlError, r.ReqlDriverError) as ex:
        error_queue.put((RuntimeError, RuntimeError(ex.message),
                         traceback.extract_tb(sys.exc_info()[2])))
    except:
        ex_type, ex_class, tb = sys.exc_info()
        error_queue.put((ex_type, ex_class, traceback.extract_tb(tb)))
    finally:
        if writer and writer.is_alive():
            task_queue.put(StopIteration())
            writer.join()
예제 #17
0
async def servo_loop(device: str,
                     sids: Sequence[int],
                     coder: KanervaCoder,
                     main2gvf: mp.SimpleQueue,
                     gvf2main: mp.SimpleQueue,
                     **kwargs):
    # objects to read and write from servos
    sr, sw = await serial_asyncio.open_serial_connection(url=device,
                                                         **kwargs)

    # set servo speeds to slowest possible
    for sid in sids:
        await send_msg(sr, sw, sid, [0x03, 0x20, 0x00, 0x01])

    # set initial action
    action = initial_action

    # some constants
    read_data = [0x02,  # read
                 0x24,  # starting from 0x24
                 0x08]  # a string of 8 bytes

    # read_all = [0x02,  # read
    #             0x00,  # starting from the beginning
    #             0x32]  # all the bytes

    store_data = []

    try:
        for _ in range(20000):
            # read data from servos
            byte_data = [await send_msg(sr, sw, sid, read_data) for sid in sids]

            # convert to human-readable data
            obs = sum([parse_data(bd) for bd in byte_data], []) + list(action)

            # get active tiles in kanerva coding
            active_pts = coder(obs)

            # send action and features to GVFs
            gvf_data = (obs, active_pts)
            main2gvf.put(gvf_data)

            # get action control GVFs
            action = gvf2main.get()

            # send action to servos
            instructions = [goal_instruction(a)
                            for a in action
                            if a is not None]
            for sid, instr in zip(sids, instructions):
                await send_msg(sr, sw, sid, instr)

            # record data for later
            store_data.append(gvf_data)

        np.save('offline_data.npy', store_data)

    except KeyboardInterrupt:
        pass
    finally:
        sr.read()
        await sw.drain()

        for sid in sids:
            write(sw, sid, [0x03, 0x18, 0x00])  # disable torque
예제 #18
0
class BeerLog():
    """BeerLog main class.

  Attributes:
    nfc_reader(bnfc.base): the BeerNFC object.
  """
    def __init__(self):
        self.nfc_reader = None
        self.ui = None
        self.db = None
        self._capture_command = None
        self._database_path = None
        self._events_queue = SimpleQueue()
        self._fake_nfc = False
        self._known_tags = None
        self._last_taken_picture = None
        self._picture_dir = None
        self._should_beep = None

        self._timers = []

    def InitNFC(self, path=None):
        """Initializes the NFC reader.

    Args:
      path(str): the option path to the device.
    """
        if self._fake_nfc:
            self.nfc_reader = nfc_base.FakeNFC(events_queue=self._events_queue)
        else:
            self.nfc_reader = nfc_base.BeerNFC(events_queue=self._events_queue,
                                               should_beep=self._should_beep,
                                               path=path)
        self.nfc_reader.process.start()
        logging.debug('Started NFC {0!s}'.format(self.nfc_reader))

    def ParseArguments(self):
        """Parses arguments.

    Returns:
      argparse.NameSpace: the parsed arguments.
    """

        parser = argparse.ArgumentParser(description='BeerLog')
        parser.add_argument('--nobeep',
                            dest='should_beep',
                            action='store_false',
                            default=True,
                            help='Disable beeping of the NFC reader')
        parser.add_argument(
            '--capture',
            dest='capture_command',
            action='store',
            help=('Picture capture command. Output filename will be appended. '
                  'Exemple: "fswebcam -r 1280x720. -S 10"'))
        parser.add_argument('-d',
                            '--debug',
                            dest='debug',
                            action='store_true',
                            help='Debug mode')
        parser.add_argument(
            '--database',
            dest='database',
            action='store',
            default=os.path.join(os.path.dirname(__name__), 'beerlog.sqlite'),
            help='the path to the sqlite file, or ":memory:" for a memory db')
        parser.add_argument('--known_tags',
                            dest='known_tags',
                            action='store',
                            default='known_tags.json',
                            help='the known tags file to use to use')
        parser.add_argument('--dir',
                            dest='picture_dir',
                            action='store',
                            default='pics',
                            help='Where to store the pictures')
        parser.add_argument(
            '--fake_nfc',
            dest='fake_nfc',
            action='store_true',
            help='Uses a fake NFC reader that will sometimes tag things')

        args = parser.parse_args()

        self._capture_command = args.capture_command
        self._database_path = args.database
        self._known_tags = args.known_tags
        self._picture_dir = args.picture_dir
        self._should_beep = args.should_beep
        self._fake_nfc = args.fake_nfc

        if args.debug:
            logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
        else:
            logging.basicConfig(stream=sys.stdout, level=logging.INFO)

        if not os.path.isdir(args.picture_dir):
            # TODO more checks
            os.mkdir(args.picture_dir)

    def InitDB(self):
        """Initializes the BeerLogDB object."""
        self.db = beerlogdb.BeerLogDB(self._database_path)
        self.db.LoadTagsDB(self._known_tags)

    def Main(self):
        """Runs the script."""
        self.ParseArguments()
        self.InitDB()
        self.InitNFC(path="usb")
        self.InitUI()
        self.Loop()

    def InitUI(self):
        """Initialises the user interface."""
        # Only GUI for now
        self.ui = display.LumaDisplay(events_queue=self._events_queue,
                                      database=self.db)
        self.ui.Setup()
        self.ui.Update()

    def PushEvent(self, event):
        """Adds an Event object in the events queue.

    Args:
      event(events.BaseEvent): the event to push.
    """
        self._events_queue.put(event)

    def AddDelayedEvent(self, event, timeout):
        """Adds an Event object in the events queue after a delay.

    Args:
      event(events.BaseEvent): the event to push.
      timeout(int): the number of seconds after which the event will be pushed.
    """
        t = Timer(timeout, self.PushEvent, args=(event, ))
        t.start()
        self._timers.append(t)

    def ResetTimers(self):
        """Reset all timers set for timed events, cancelling the BaseEvent delivery
    to the events queue."""
        for timer in self._timers:
            timer.cancel()

    def Loop(self):
        """Main program loop.

    Looks for any new event in the main progrem Queue and processes them.
    """
        while True:
            event = self._events_queue.get()
            if event:
                try:
                    self._HandleEvent(event)
                except Exception as e:  #pylint: disable=broad-except
                    logging.error(e)
                    err_event = events.ErrorEvent('{0!s}'.format(e))
                    self.PushEvent(err_event)

            time.sleep(0.05)
            self.ui.Update()

    def _HandleEvent(self, event):
        """Does something with an Event.

    Args:
      event(BaseEvent): the event to handle.
    Raises:
      BeerLogError: if an error is detected when handling the event.
    """
        # TODO : have a UI class of events, and let the ui object deal with them
        self.ResetTimers()
        if event.type == constants.EVENTTYPES.NFCSCANNED:
            name = self.db.GetNameFromHexID(event.uid)
            if not name:
                raise errors.BeerLogError(
                    'Could not find the corresponding name for tag id "{0!s}" '
                    'in "{1:s}"'.format(event.uid, self._known_tags))
            character = self.db.GetCharacterFromHexID(event.uid)
            self.db.AddEntry(event.uid, self._last_taken_picture)
            self.ui.machine.scan(who=name, character=character)
            self.AddDelayedEvent(events.UIEvent(constants.EVENTTYPES.ESCAPE),
                                 2)
        elif event.type == constants.EVENTTYPES.KEYUP:
            self.ui.machine.up()
        elif event.type == constants.EVENTTYPES.KEYDOWN:
            self.ui.machine.down()
        elif event.type == constants.EVENTTYPES.ESCAPE:
            self.ui.machine.back()
        elif event.type == constants.EVENTTYPES.KEYMENU1:
            self.ui.machine.back()
        elif event.type == constants.EVENTTYPES.ERROR:
            self.ui.machine.error(error=str(event))
        else:
            err_msg = 'Unknown Event: {0!s}'.format(event)
            print(err_msg)
            self.PushEvent(events.ErrorEvent(err_msg))
            #self.AddDelayedEvent(UIEvent(constants.EVENTTYPES.ESCAPE), 3)

        self.db.Close()

    def TakePicture(self, command):
        """Takes a picture.

    Args:
      command(str): command to be run after a filename is appended to it.

    Returns:
      str: the path to the (hopefully created) picture, or None if no command
        was passed.
    """
        if not command:
            return None

        filepath = datetime.datetime.now().strftime('%Y-%m-%d_%H%M%S.jpg')
        cmd = '{0} "{1}"'.format(command,
                                 os.path.join(self._picture_dir, filepath))
        logging.debug('Running {0}'.format(cmd))
        subprocess.call('{0} "{1}"'.format(cmd, filepath), shell=True)

        return filepath
예제 #19
0
class RunServer(Process):
    """Run a given command until a given line is found in the output."""

    def __init__(self, cmd=None, waitForLine='Waitress serving Webware'):
        super().__init__()
        self.cmd = cmd or ['webware', 'serve']
        self.waitForLine = waitForLine
        self.outputQueue = SimpleQueue()
        self.pollQueue = Queue()
        self.stopQueue = SimpleQueue()

    def getOutput(self):
        output = []
        while not self.outputQueue.empty():
            output.append(self.outputQueue.get())
        return output

    def getExitCode(self, timeout=10):
        try:
            return self.pollQueue.get(timeout=timeout)
        except Empty:
            self.stop()
            return f'timeout - {self.waitForLine!r} not found'

    def stop(self, timeout=10):
        if not self.is_alive():
            return
        # Because on Windows serve.exe starts another Python process,
        # we need to make sure the grandchildren are stopped as well.
        children = psutil.Process(self.pid).children(recursive=True)
        for p in children:
            try:
                p.terminate()
            except psutil.NoSuchProcess:
                pass
        children = psutil.wait_procs(children, timeout=timeout)[1]
        for p in children:
            try:
                p.kill()
            except psutil.NoSuchProcess:
                pass

    def run(self):
        # Because on Windows serve.exe starts another Python process,
        # we need to make sure that process runs in unbuffered mode as well.
        environ['PYTHONUNBUFFERED'] = '1'
        with Popen(self.cmd, bufsize=1, universal_newlines=True,
                   encoding='utf-8', stdout=PIPE, stderr=STDOUT) as p:
            outputStarted = False
            while True:
                ret = p.poll()
                if ret is not None:
                    break
                line = p.stdout.readline().rstrip()
                if line:
                    outputStarted = True
                if outputStarted:
                    self.outputQueue.put(line)
                    if line.startswith(self.waitForLine):
                        break
            self.pollQueue.put(ret)
            if ret is None:
                while True:
                    ret = p.poll()
                    if ret is not None:
                        break
                    line = p.stdout.readline().rstrip()
                    self.outputQueue.put(line)
            self.pollQueue.put(ret)
예제 #20
0
def main():
    args = parse_args()
    video_path = args.video_path
    weight_path = args.weight_path
    if pipeline:
        input_queue = JoinableQueue()
        pre_process = Process(target=pre_processor,
                              args=((input_queue, video_path), ))
        pre_process.start()
        output_queue = SimpleQueue()
        post_process = Process(target=post_processor,
                               args=((output_queue, args.visualize), ))
        post_process.start()
    else:
        cap = cv2.VideoCapture(video_path)
    save_dict = torch.load(weight_path, map_location='cpu')
    net.load_state_dict(save_dict['net'])
    net.eval()
    net.cuda()
    while True:
        if pipeline:
            loop_start = time.time()
            x = input_queue.get()
            input_queue.task_done()
            gpu_start = time.time()
            seg_pred, exist_pred = network(net, x)
            gpu_end = time.time()
            output_queue.put((x, seg_pred, exist_pred))
            loop_end = time.time()
        else:
            if not cap.isOpened():
                break
            ret, frame = cap.read()
            if ret:
                loop_start = time.time()
                frame = transform_img({'img': frame})['img']
                img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                x = transform_to_net({'img': img})['img']
                x.unsqueeze_(0)
                gpu_start = time.time()
                seg_pred, exist_pred = network(net, x)
                gpu_end = time.time()
                seg_pred = seg_pred.numpy()[0]
                exist_pred = exist_pred.numpy()
                exist = [1 if exist_pred[0, i] > 0.5 else 0 for i in range(4)]
                i2i2 = []

                #change
                for i in getLane.prob2lines_CULane(seg_pred, exist):
                    i2i2 += i
                    pass
                loop_end = time.time()
                if args.visualize:
                    img = visualize(img, seg_pred, exist_pred)
                    cv2.imshow('input_video', frame)
                    cv2.imshow("output_video", img)
                if cv2.waitKey(1) & 0xFF == ord('q'):
                    break
            else:
                break
        print("gpu_runtime:", gpu_end - gpu_start, "FPS:",
              int(1 / (gpu_end - gpu_start)))
        print("total_runtime:", loop_end - loop_start, "FPS:",
              int(1 / (loop_end - loop_start)))
    cv2.destroyAllWindows()
예제 #21
0
    def run(self,
            tasks,
            render,
            update,
            render_args=(),
            render_kwargs={},
            update_args=(),
            update_kwargs={}):

        # establish ipc queues
        job_queue = SimpleQueue()
        result_queue = SimpleQueue()
        tasks_per_job = Value('i')

        # start process to generate jobs
        tasks_per_job.value = self._tasks_per_job
        producer = Process(target=self._producer,
                           args=(tasks, job_queue, tasks_per_job))
        producer.start()

        # start worker processes
        workers = []
        for pid in range(self._processes):
            p = Process(target=self._worker,
                        args=(render, render_args, render_kwargs, job_queue,
                              result_queue))
            p.start()
            workers.append(p)

        # consume results
        remaining = len(tasks)
        while remaining:

            results = result_queue.get()

            # has a worker failed?
            if isinstance(results, Exception):

                # clean up
                for worker in workers:
                    if worker.is_alive():
                        worker.terminate()
                producer.terminate()

                # wait for processes to terminate
                for worker in workers:
                    worker.join()
                producer.join()

                # raise the exception to inform the user
                raise results

            # update state with new results
            for result in results:
                update(result, *update_args, **update_kwargs)
                remaining -= 1

        # shutdown workers
        for _ in workers:
            job_queue.put(None)

        # store tasks per job value for next run
        self._tasks_per_job = tasks_per_job.value
예제 #22
0
파일: _export.py 프로젝트: AtnNn/rethinkdb
def export_table(db, table, directory, options, error_queue, progress_info, sindex_counter, hook_counter, exit_event):
    signal.signal(signal.SIGINT, signal.SIG_DFL) # prevent signal handlers from being set in child processes
    
    writer = None

    try:
        # -- get table info
        
        table_info = options.retryQuery('table info: %s.%s' % (db, table), query.db(db).table(table).info())
        
        # Rather than just the index names, store all index information
        table_info['indexes'] = options.retryQuery(
            'table index data %s.%s' % (db, table),
            query.db(db).table(table).index_status(),
            runOptions={'binary_format':'raw'}
        )
        
        sindex_counter.value += len(table_info["indexes"])

        table_info['write_hook'] = options.retryQuery(
            'table write hook data %s.%s' % (db, table),
            query.db(db).table(table).get_write_hook(),
            runOptions={'binary_format':'raw'})

        if table_info['write_hook'] != None:
            hook_counter.value += 1

        with open(os.path.join(directory, db, table + '.info'), 'w') as info_file:
            info_file.write(json.dumps(table_info) + "\n")
        with sindex_counter.get_lock():
            sindex_counter.value += len(table_info["indexes"])
        # -- start the writer
        task_queue = SimpleQueue()
        writer = None
        if options.format == "json":
            filename = directory + "/%s/%s.json" % (db, table)
            writer = multiprocessing.Process(target=json_writer, args=(filename, options.fields, task_queue, error_queue, options.format))
        elif options.format == "csv":
            filename = directory + "/%s/%s.csv" % (db, table)
            writer = multiprocessing.Process(target=csv_writer, args=(filename, options.fields, options.delimiter, task_queue, error_queue))
        elif options.format == "ndjson":
            filename = directory + "/%s/%s.ndjson" % (db, table)
            writer = multiprocessing.Process(target=json_writer, args=(filename, options.fields, task_queue, error_queue, options.format))
        else:
            raise RuntimeError("unknown format type: %s" % options.format)
        writer.start()
        
        # -- read in the data source
        
        # - 
        
        lastPrimaryKey = None
        read_rows      = 0
        runOptions     = {
            "time_format":"raw",
            "binary_format":"raw"
        }
        if options.outdated:
            runOptions["read_mode"] = "outdated"
        cursor = options.retryQuery(
            'inital cursor for %s.%s' % (db, table),
            query.db(db).table(table).order_by(index=table_info["primary_key"]),
            runOptions=runOptions
        )
        while not exit_event.is_set():
            try:
                for row in cursor:
                    # bail on exit
                    if exit_event.is_set():
                        break
                    
                    # add to the output queue
                    task_queue.put([row])
                    lastPrimaryKey = row[table_info["primary_key"]]
                    read_rows += 1
                    
                    # Update the progress every 20 rows
                    if read_rows % 20 == 0:
                        progress_info[0].value = read_rows
                
                else:
                    # Export is done - since we used estimates earlier, update the actual table size
                    progress_info[0].value = read_rows
                    progress_info[1].value = read_rows
                    break
            
            except (errors.ReqlTimeoutError, errors.ReqlDriverError) as e:
                # connection problem, re-setup the cursor
                try:
                    cursor.close()
                except Exception: pass
                cursor = options.retryQuery(
                    'backup cursor for %s.%s' % (db, table),
                    query.db(db).table(table).between(lastPrimaryKey, None, left_bound="open").order_by(index=table_info["primary_key"]),
                    runOptions=runOptions
                )
    
    except (errors.ReqlError, errors.ReqlDriverError) as ex:
        error_queue.put((RuntimeError, RuntimeError(ex.message), traceback.extract_tb(sys.exc_info()[2])))
    except:
        ex_type, ex_class, tb = sys.exc_info()
        error_queue.put((ex_type, ex_class, traceback.extract_tb(tb)))
    finally:
        if writer and writer.is_alive():
            task_queue.put(StopIteration())
            writer.join()
예제 #23
0
class ProcessPoolExecutor(_base.Executor):
    def __init__(self, max_workers=None):
        """Initializes a new ProcessPoolExecutor instance.

        Args:
            max_workers: The maximum number of processes that can be used to
                execute the given calls. If None or not given then as many
                worker processes will be created as the machine has processors.
        """
        _check_system_limits()

        if max_workers is None:
            self._max_workers = os.cpu_count() or 1
        else:
            self._max_workers = max_workers

        # Make the call queue slightly larger than the number of processes to
        # prevent the worker processes from idling. But don't make it too big
        # because futures in the call queue cannot be cancelled.
        self._call_queue = multiprocessing.Queue(self._max_workers +
                                                 EXTRA_QUEUED_CALLS)
        # Killed worker processes can produce spurious "broken pipe"
        # tracebacks in the queue's own worker thread. But we detect killed
        # processes anyway, so silence the tracebacks.
        self._call_queue._ignore_epipe = True
        self._result_queue = SimpleQueue()
        self._work_ids = queue.Queue()
        self._queue_management_thread = None
        # Map of pids to processes
        self._processes = {}

        # Shutdown is a two-step process.
        self._shutdown_thread = False
        self._shutdown_lock = threading.Lock()
        self._broken = False
        self._queue_count = 0
        self._pending_work_items = {}

    def _start_queue_management_thread(self):
        # When the executor gets lost, the weakref callback will wake up
        # the queue management thread.
        def weakref_cb(_, q=self._result_queue):
            q.put(None)
        if self._queue_management_thread is None:
            # Start the processes so that their sentinels are known.
            self._adjust_process_count()
            self._queue_management_thread = threading.Thread(
                    target=_queue_management_worker,
                    args=(weakref.ref(self, weakref_cb),
                          self._processes,
                          self._pending_work_items,
                          self._work_ids,
                          self._call_queue,
                          self._result_queue))
            self._queue_management_thread.daemon = True
            self._queue_management_thread.start()
            _threads_queues[self._queue_management_thread] = self._result_queue

    def _adjust_process_count(self):
        for _ in range(len(self._processes), self._max_workers):
            p = multiprocessing.Process(
                    target=_process_worker,
                    args=(self._call_queue,
                          self._result_queue))
            p.start()
            self._processes[p.pid] = p

    def submit(self, fn, *args, **kwargs):
        with self._shutdown_lock:
            if self._broken:
                raise BrokenProcessPool('A child process terminated '
                    'abruptly, the process pool is not usable anymore')
            if self._shutdown_thread:
                raise RuntimeError('cannot schedule new futures after shutdown')

            f = _base.Future()
            w = _WorkItem(f, fn, args, kwargs)

            self._pending_work_items[self._queue_count] = w
            self._work_ids.put(self._queue_count)
            self._queue_count += 1
            # Wake up queue management thread
            self._result_queue.put(None)

            self._start_queue_management_thread()
            return f
    submit.__doc__ = _base.Executor.submit.__doc__

    def shutdown(self, wait=True):
        with self._shutdown_lock:
            self._shutdown_thread = True
        if self._queue_management_thread:
            # Wake up queue management thread
            self._result_queue.put(None)
            if wait:
                self._queue_management_thread.join()
        # To reduce the risk of opening too many files, remove references to
        # objects that use file descriptors.
        self._queue_management_thread = None
        self._call_queue = None
        self._result_queue = None
        self._processes = None
    shutdown.__doc__ = _base.Executor.shutdown.__doc__
예제 #24
0
def spawn_import_clients(options, files_info):
    # Spawn one reader process for each db.table, as well as many client processes
    task_queue = SimpleQueue()
    error_queue = SimpleQueue()
    exit_event = multiprocessing.Event()
    interrupt_event = multiprocessing.Event()
    errors = []
    reader_procs = []
    client_procs = []

    parent_pid = os.getpid()
    signal.signal(
        signal.SIGINT,
        lambda a, b: abort_import(a, b, parent_pid, exit_event, task_queue,
                                  client_procs, interrupt_event))

    try:
        progress_info = []
        rows_written = multiprocessing.Value(ctypes.c_longlong, 0)

        for i in xrange(options["clients"]):
            client_procs.append(
                multiprocessing.Process(target=client_process,
                                        args=(options["host"], options["port"],
                                              options["auth_key"], task_queue,
                                              error_queue, rows_written,
                                              options["force"],
                                              options["durability"])))
            client_procs[-1].start()

        for file_info in files_info:
            progress_info.append((
                multiprocessing.Value(ctypes.c_longlong,
                                      -1),  # Current lines/bytes processed
                multiprocessing.Value(ctypes.c_longlong,
                                      0)))  # Total lines/bytes to process
            reader_procs.append(
                multiprocessing.Process(target=table_reader,
                                        args=(options, file_info, task_queue,
                                              error_queue, progress_info[-1],
                                              exit_event)))
            reader_procs[-1].start()

        # Wait for all reader processes to finish - hooray, polling
        while len(reader_procs) > 0:
            time.sleep(0.1)
            # If an error has occurred, exit out early
            while not error_queue.empty():
                exit_event.set()
                errors.append(error_queue.get())
            reader_procs = [proc for proc in reader_procs if proc.is_alive()]
            update_progress(progress_info)

        # Wait for all clients to finish
        alive_clients = sum([client.is_alive() for client in client_procs])
        for i in xrange(alive_clients):
            task_queue.put(StopIteration())

        while len(client_procs) > 0:
            time.sleep(0.1)
            client_procs = [
                client for client in client_procs if client.is_alive()
            ]

        # If we were successful, make sure 100% progress is reported
        if len(errors) == 0 and not interrupt_event.is_set():
            print_progress(1.0)

        def plural(num, text):
            return "%d %s%s" % (num, text, "" if num == 1 else "s")

        # Continue past the progress output line
        print("")
        print("%s imported in %s" % (plural(
            rows_written.value, "row"), plural(len(files_info), "table")))
    finally:
        signal.signal(signal.SIGINT, signal.SIG_DFL)

    if interrupt_event.is_set():
        raise RuntimeError("Interrupted")

    if len(errors) != 0:
        # multiprocessing queues don't handling tracebacks, so they've already been stringified in the queue
        for error in errors:
            print("%s" % error[1], file=sys.stderr)
            if options["debug"]:
                print("%s traceback: %s" % (error[0].__name__, error[2]),
                      file=sys.stderr)
            if len(error) == 4:
                print("In file: %s" % error[3], file=sys.stderr)
        raise RuntimeError("Errors occurred during import")
예제 #25
0
class CozmoMqttProgram():
    def __init__(self) -> None:
        self._cozmo = cozmo_client.Cozmo()
        self._queue = SimpleQueue()
        self._mqtt_client = None
        if MQTT_BROKER_URL is not None:
            self._mqtt_client = mqtt_client.MqttClient(
                MQTT_BROKER_URL, MQTT_BROKER_PORT, MQTT_USERNAME,
                MQTT_PASSWORD, MQTT_TOPICS, self._on_mqtt_message)
        self.sdk_conn: CozmoConnection = None
        self._faces: Dict[Face, datetime] = dict()
        self._visible_objects: Dict[ObservableObject, datetime] = dict()
        self._message_manager = MessageManager()
        self._cozmo_state = CozmoStates.Disconnected

    @property
    def cozmo_state(self) -> CozmoStates:
        return self._cozmo_state

    @cozmo_state.setter
    def cozmo_state(self, state: CozmoStates) -> None:
        self._cozmo_state = state
        self._publish_cozmo_state()

    async def run_with_robot_async(self, robot: cozmo.robot.Robot) -> None:
        self.sdk_conn = robot.world.conn
        await self._run_async(robot)

    async def _run_async(self, robot: cozmo.robot.Robot) -> None:
        await self._initialize_async(robot)
        try:
            while self.sdk_conn.is_connected:
                self._cozmo.update_needs_level()
                if self._cozmo.needs_charging(
                ) and not self._cozmo.is_sleeping:
                    await self._charge_cycle()
                    await self._cozmo.wake_up_async()
                    self._cozmo_freetime()

                if not self._queue.empty():
                    await self._handel_queue_async()

                if self._cozmo.world.visible_face_count() > 0:
                    face = self._get_visible_face()
                    if face:
                        if face in self._faces:
                            last_seen = self._faces[face]
                            if (datetime.now() -
                                    last_seen).total_seconds() > 60:
                                await self._cozmo_do_async(
                                    self._on_saw_face(face))
                        else:
                            await self._cozmo_do_async(self._on_saw_face(face))

                if self._cozmo.robot.is_picked_up:
                    await self._cozmo_do_async(self._on_picked_up_async())

                if self._cozmo.robot.is_cliff_detected:
                    await self._cozmo_do_async(self._on_cliff_detected_async())

                if self._cozmo.world.visible_object_count(
                        object_type=ObservableObject) > 0:
                    visible_object = self._get_visible_object()
                    if visible_object:
                        if visible_object in self._visible_objects:
                            last_seen = self._visible_objects[visible_object]
                            if (datetime.now() -
                                    last_seen).total_seconds() > 60 * 5:
                                await self._cozmo_do_async(
                                    self._on_new_object_appeared_async(
                                        visible_object))
                        else:
                            await self._cozmo_do_async(
                                self._on_new_object_appeared_async(
                                    visible_object))

                await asyncio.sleep(0.1)
        except:
            print("Unexpected error:", sys.exc_info()[0])

        await self.terminate_async()

    async def _initialize_async(self, robot: cozmo.robot.Robot) -> None:
        self._observe_connection_lost(self.sdk_conn, self._on_connection_lost)
        self._cozmo.set_robot(robot)
        await asyncio.gather(self._cozmo.connect_to_cubes_async(),
                             self._cozmo.get_off_charger_async())
        if self._mqtt_client is not None:
            await self._mqtt_client.connect_async()
        self.cozmo_state = CozmoStates.Connected
        self._cozmo_freetime()

    async def terminate_async(self) -> None:
        print("Terminating")
        if self._mqtt_client is not None:
            await self._mqtt_client.disconnect_async()
        if self.sdk_conn.is_connected:
            print("Sending cozmo back to charger")
            await self._cozmo.stop_all_actions_async()
            self._cozmo.back_to_normal()
            await self._cozmo.get_on_charger_async()

    def _observe_connection_lost(self, connection: CozmoConnection, cb):
        meth = connection.connection_lost

        @functools.wraps(meth)
        def connection_lost(self, exc):
            meth(exc)
            cb()

        connection.connection_lost = types.MethodType(connection_lost,
                                                      connection)

    def _on_connection_lost(self) -> None:
        print("Captured connection lost")
        self.cozmo_state = CozmoStates.ConnectionLost

    def _publish_cozmo_state(self) -> None:
        if self._mqtt_client is not None:
            payload = dict()
            payload["status"] = self.cozmo_state.value
            attributes = dict()
            if self._cozmo.robot:
                attributes["battery_voltage"] = self._cozmo.battery_voltage
            payload["attributes"] = attributes
            self._mqtt_client.publish(COZMO_MQTT_PUBLISHING_TOPIC, payload)

    async def _on_saw_face(self, face: Face) -> None:
        self._faces[face] = datetime.now()
        self.cozmo_state = CozmoStates.SawFace
        print("An face appeared: {}".format(face))
        if face.name:
            await self._cozmo.turn_toward_face_async(face)
            message = self._message_manager.get_hello_message(face)
            await self._cozmo.random_positive_anim_async()
            await self._cozmo.say_async(message)
            if face.known_expression:
                message = self._message_manager.get_fece_expression_message(
                    face.known_expression, face)
                await self._cozmo.say_async(message)
        else:
            message = self._message_manager.get_non_recognized_message(face)
            await self._cozmo.say_async(message)

    async def _on_picked_up_async(self) -> None:
        print("Cozmo was picked up")
        self.cozmo_state = CozmoStates.PickedUp
        face = self._get_visible_face()
        message = self._message_manager.get_picked_up_message(face)
        await self._cozmo.random_positive_anim_async()
        if face:
            await self._cozmo.display_camera_image_async()
        await self._cozmo.say_async(message)
        while self._cozmo.robot.is_picked_up:
            await asyncio.sleep(0.1)
        print("Cozmo was put down")

    async def _on_cliff_detected_async(self) -> None:
        print("Cozmo detected a cliff")
        self.cozmo_state = CozmoStates.OnCliff
        self._cozmo.stop()
        self._cozmo.clear_current_animations()
        await self._cozmo.drive_wheels_async(-40, 1)
        face = self._get_visible_face()
        message = self._message_manager.get_cliff_detected_message(face)
        await self._cozmo.random_negative_anim_async()
        await self._cozmo.say_async(message)
        while self._cozmo.robot.is_cliff_detected:
            await asyncio.sleep(0.1)
        print("Cozmo away from cliff")

    async def _on_new_object_appeared_async(
            self, visible_object: ObservableObject) -> None:
        self._visible_objects[visible_object] = datetime.now()
        print("An obbject appeared: {}".format(visible_object))
        face = self._get_visible_face()
        message = self._message_manager.get_object_appeared_message(
            visible_object, face)
        await self._cozmo.say_async(message)

    def _get_visible_face(self) -> Face:
        if self._cozmo.world.visible_face_count() == 0:
            print("Found no visibile faces")
            return None

        visible_face = next((face for face in self._cozmo.world.visible_faces),
                            None)
        return visible_face

    def _get_visible_object(self) -> ObservableObject:
        if self._cozmo.world.visible_object_count(
                object_type=ObservableObject) == 0:
            print("Found no visibile objects")
            return None

        visible_obj = next((obj for obj in self._cozmo.world.visible_objects),
                           None)
        return visible_obj

    def _cozmo_freetime(self) -> None:
        self._cozmo.start_free_time()
        self.cozmo_state = CozmoStates.Freetime

    async def _cozmo_do_async(self, async_f: Awaitable) -> None:
        if self._cozmo.freetime_enabled:
            self._cozmo.stop_free_time()
        try:
            await async_f
            self._cozmo_freetime()
        except cozmo.RobotBusy:
            print("Task Exception...cozmo is Busy")

    async def _charge_cycle(self) -> None:
        self.cozmo_state = CozmoStates.GoingToCharge
        print("Cozmo needs charging. Battery level {}".format(
            self._cozmo.battery_voltage))
        await self._cozmo.start_charging_routine_async()
        self.cozmo_state = CozmoStates.Charging
        await self._cozmo.charge_to_full_async()
        print("Cozmo charged")

    # MQTT Queue Related-------------------------------------------------------------------------------------------------------------------
    def _on_mqtt_message(self, client, topic, payload, qos,
                         properties) -> None:
        try:
            json_data = json.loads(payload.decode('utf-8'))
            print("Topic: {}".format(topic))
            print("Data: {}".format(json_data))
            topic_data_tuple = (topic, json_data)
            self._queue.put(topic_data_tuple)
        except:
            print("Unexpected error:", sys.exc_info()[0])

    async def _handel_queue_async(self) -> None:
        if not self._queue.empty():
            print("Cozmo processing queue")
            await self._cozmo_do_async(
                self._process_message_async(self._queue.get()))
            await self._handel_queue_async()

    async def _process_message_async(self, topic_data_tuple: tuple) -> None:
        topic = topic_data_tuple[0]
        json_data = topic_data_tuple[1]
        if topic == MQTT_WEATHER_TOPIC:
            await self._process_weather_notification_async(json_data)
        elif topic == MQTT_CONTROL_TOPIC:
            await self._process_control_msg_async(json_data)

    async def _process_control_msg_async(self, json_data: dict) -> None:
        if "msg" in json_data:
            msg = json_data["msg"]
            if msg == 'sleep':
                asyncio.create_task(self._cozmo.sleep_async())
            if msg == 'freetime':
                await self._cozmo.wake_up_async()
                self._cozmo_freetime()

    async def _process_weather_notification_async(self,
                                                  json_data: dict) -> None:
        if "msg" in json_data:
            msg = json_data["msg"]
            image_url = None
            color = None
            title = "I have a weather update notification for you."
            if "imagePath" in json_data:
                image_url = json_data["imagePath"]
            if 'clear' in msg:
                print("Clear outside!")
                color = YELLOW
            elif 'cloudy' in msg:
                print("Cloudy outside!")
                color = SLATE_GRAY
            await self._cozmo_annonuce_weather_update_async(
                msg, title, color, image_url)

    async def _cozmo_annonuce_weather_update_async(
            self,
            msg: str,
            title: str,
            rgb: Union[Tuple, None] = None,
            image_url: str = None) -> None:
        self.cozmo_state = CozmoStates.Anouncing
        if rgb:
            light = Light(Color(rgb=rgb)).flash()
            self._cozmo.cubes_change_lights(light)
            self._cozmo.backpack_change_light(light)
        await self._cozmo.random_positive_anim_async()
        await self._cozmo.say_async(title)
        await self._cozmo.say_async(msg)
        if image_url:
            await self._cozmo.show_image_from_url_async(image_url)
        if rgb:
            self._cozmo.turn_cubes_lights_off()
            self._cozmo.turn_backpack_light_off()
예제 #26
0
파일: client.py 프로젝트: ATRAN2/Futami
class InternalClient(Client):
    """This client is a fake client which is responsible for firing off
    all messages from the update notification side, and handling the
    routing of those messages to users watching.

    It does not have a socket, so it should not be included in the
    server's clients dictionary.
    """

    def __init__(self, server, nickname, user, host='localhost'):
        self.server = server
        self.nickname = nickname
        self.realname = nickname
        self.user = user
        self.host = host

        self._readbuffer = ""
        self._writebuffer = ""
        self.request_queue = SimpleQueue()
        self.response_queue = SimpleQueue()

        # dict of board => list of users
        self.board_watchers = defaultdict(list)

        # dict of board, thread => list of users
        self.thread_watchers = defaultdict(lambda: defaultdict(list))

        Process(
            target=Ami,
            name='immediate api worker',
            args=(self.request_queue, self.response_queue)
        ).start()

    def loop_hook(self):
        while not self.response_queue.empty():
            result = self.response_queue.get()

            # Handle exceptions in-band from child workers here.
            if isinstance(result, StoredException):
                print(result.traceback)
                raise RuntimeError(
                    "Exception caught from worker '{}', see above for exception details".format(
                        result.process,
                ))

            logger.debug("read from response queue {}".format(result))

            send_as = "/{}/{}".format(result.board, result.post_no)

            # Initial channel loads have identifiers, use them to find out
            # where to go
            if result.identifier:
                client, channel, target = result.identifier
                client = self.server.get_client(client)
                logger.debug("initial channel load, using identitifier info: sending to {} on {}".format(client, channel))

                if isinstance(target, BoardTarget):
                    self._send_message(
                        client, channel, result.summary,
                        sending_nick=send_as,
                    )
                    continue
                elif isinstance(target, ThreadTarget):
                    self._send_message(
                        client, channel, result.comment,
                        sending_nick=send_as,
                    )
                    continue

            if result.is_reply:  # Send to thread channel
                channel = "#/{}/{}".format(result.board, result.reply_to)
                logger.debug("sending reply to channel {}".format(channel))

                # TODO: Remove users who have disconnected from the server here
                for client in self.thread_watchers[result.board][result.reply_to]:
                    logger.debug("sending reply to {}".format(client))
                    self._send_message(
                        client, channel, result.comment,
                        sending_nick=send_as,
                    )
            else:
                channel = "#/{}/".format(result.board)
                logger.debug("sending thread update to channel {}".format(channel))

                # TODO: Remove users who have disconnected from the server here
                for client in self.board_watchers[result.board]:
                    self._send_message(
                        client, channel, result.summary,
                        sending_nick=send_as,
                    )

    def _parse_prefix(self, prefix):
        m = re.search(
            ":(?P<nickname>[^!]*)!(?P<username>[^@]*)@(?P<host>.*)",
            prefix
        )
        return m.groupdict()

    @property
    def socket(self):
        raise AttributeError('InternalClients have no sockets')

    def message(self, message):
        pass
        # prefix, message = message.split(" ", 1)

        # prefix = self._parse_prefix(prefix)

        # self.sending_client = self.server.get_client(prefix['nickname'])

        # self._readbuffer = message + '\r\n'
        # self._parse_read_buffer()

    def client_joined(self, client, channel):
        logger.debug("InternalClient handling {} joined {}".format(client, channel))

        channel_registration_map = {
            r'#/(.+)/$': self._client_register_board,
            r'#/(.+)/(\d+)$': self._client_register_thread,
        }

        matched_registration = False

        for regex, register_method in channel_registration_map.items():
            m = re.match(regex, channel.name)
            if m:
                register_method(client, channel, *m.groups())
                matched_registration = True
                break

        if not matched_registration:
            self._send_message(
                client, channel.name,
                "This channel ({}) doesn't look like a board. Nothing will happen in this channel.".format(channel.name)
            )
            return

    def _handle_command(self, command, arguments):
        # sending_client = self.sending_client
        # self.sending_client = None

        # Add handling here for actual input from users other than joins
        pass

    def _client_register_board(self, client, channel, board):
        logger.debug("registering to board: {}, {}, {}".format(client, channel, board))

        slash_board = '/{}/'.format(board)
        self._send_message(
            client, channel.name,
            "Welcome to {}, loading threads...".format(slash_board),
            sending_nick=slash_board,
        )

        target = BoardTarget(board)

        self.request_queue.put(
            SubscriptionUpdate.make(
                action=Action.LoadAndFollow,
                target=target,
                payload=(client.nickname, channel.name, target),
        ))

        self.board_watchers[board].append(client)

    def _client_register_thread(self, client, channel, board, thread):
        logging.debug("registering to thread: {}, {}, {}, {}".format(client, channel, board, thread))

        slash_board_thread = '/{}/{}'.format(board, thread)

        self._send_message(
            client, channel.name,
            "Welcome to >>>{}, loading posts...".format(slash_board_thread),
            sending_nick=slash_board_thread,
        )

        target = ThreadTarget(board, thread)

        self.request_queue.put(
            SubscriptionUpdate.make(
                action=Action.LoadAndFollow,
                target=target,
                payload=(client.nickname, channel.name, target),
        ))

        # Thread reply_tos are ints when they come back from the API
        self.thread_watchers[board][int(thread)].append(client)

    def _send_message(self, client, channel, message, sending_nick=None):
        if sending_nick:
            real_nick = self.nickname
            self.nickname = sending_nick

        client.message(
            ":{} PRIVMSG {} :{}".format(
                self.prefix,
                channel,
                message,
            )
        )

        if sending_nick:
            self.nickname = real_nick
예제 #27
0
    def _fit(self, X, y, blocks):
        """Fit base clustering estimators on X."""
        self.blocks_ = blocks

        if self.n_jobs == 1:
            blocks_computed = 0
            blocks_all = len(np.unique(blocks))

            for block in self._blocks(X, y, blocks):
                if self.partial_fit_ and block[0] in self.clusterers_:
                    data = (block, self.clusterers_[block[0]])
                else:
                    data = (block, None)

                b, clusterer = _single_fit(self.fit_, self.partial_fit_,
                                           self.base_estimator, self.verbose,
                                           data)

                if clusterer:
                    self.clusterers_[b] = clusterer

                if blocks_computed < blocks_all:
                    print("%s blocks computed out of %s" %
                          (blocks_computed, blocks_all))
                blocks_computed += 1
        else:
            try:
                from multiprocessing import SimpleQueue
            except ImportError:
                from multiprocessing.queues import SimpleQueue

            # Here the blocks will be passed to subprocesses
            data_queue = SimpleQueue()
            # Here the results will be passed back
            result_queue = SimpleQueue()

            for x in range(self.n_jobs):
                import multiprocessing as mp
                processes = []

                processes.append(
                    mp.Process(target=_parallel_fit,
                               args=(self.fit_, self.partial_fit_,
                                     self.base_estimator, self.verbose,
                                     data_queue, result_queue)))
                processes[-1].start()

            # First n_jobs blocks are sent into the queue without waiting
            # for the results. This variable is a counter that takes care of
            # this.
            presend = 0
            blocks_computed = 0
            blocks_all = len(np.unique(blocks))

            for block in self._blocks(X, y, blocks):
                if presend >= self.n_jobs:
                    b, clusterer = result_queue.get()
                    blocks_computed += 1
                    if clusterer:
                        self.clusterers_[b] = clusterer
                else:
                    presend += 1
                if self.partial_fit_:
                    if block[0] in self.clusterers_:
                        data_queue.put(('middle', block, self.clusterers_[b]))
                        continue

                data_queue.put(('middle', block, None))

            # Get the last results and tell the subprocesses to finish
            for x in range(self.n_jobs):
                if blocks_computed < blocks_all:
                    print("%s blocks computed out of %s" %
                          (blocks_computed, blocks_all))
                    b, clusterer = result_queue.get()
                    blocks_computed += 1
                    if clusterer:
                        self.clusterers_[b] = clusterer

            data_queue.put(('end', None, None))

            time.sleep(1)

        return self
예제 #28
0
class QueuePool(object):
	Process = QueueProcess

	def __init__(self, callback, pool_size=1, check_intervall=2):
		self.task_queue = SimpleQueue()
		self.result_queue = SimpleQueue()
		self._callback = callback
		self._pool = {}  # {process_name: process}
		self._tasks = {}  # {task_id: process_name}
		for _ in range(pool_size):
			process = self.Process(self.task_queue, self.result_queue)
			self._pool[process.name] = process
			process.start()
		# Check for progress periodically TODO: stop timer when queue is empty!
		self.timer = QTimer()
		self.timer.timeout.connect(self._check_for_results)
		self.timer.start(check_intervall * 1000)

	def _check_for_results(self):
		while not self.result_queue.empty():
			process_name, task_id, result_object, is_exception, is_ready = self.result_queue.get()
			if is_ready or is_exception:
				if task_id in self._tasks:
					del self._tasks[task_id]
			else:
				self._tasks[task_id] = process_name
			self._callback(task_id, result_object, is_exception, is_ready)

	def change_check_interval(self, new_interval_in_seconds):
		try:
			interval = float(new_interval_in_seconds)
		except ValueError:
			return
		self.timer.stop()
		self.timer.start(interval * 1000)

	def change_pool_size(self, new_pool_size):
		try:
			diff = int(new_pool_size) - len(self._pool)
		except ValueError:
			return
		if diff < 0:
			for _ in range(abs(diff)):
				process_name, process = self._pool.popitem()
				process.soft_interrupt.set()
		else:
			for _ in range(diff):
				process = QueueProcess(self.task_queue, self.result_queue, function=compute)
				self._pool[process.name] = process
				process.start()

	def add_task(self, task_id, *params):
		self.task_queue.put([task_id] + list(params))

	def cancel_task(self, task_id):
		process_name = self._tasks.get(task_id)
		if process_name is None:
			# task is not active, but it might be part of task_queue where it shall be removed from
			task_objects = []
			while not self.task_queue.empty():
				task_objects.append(self.task_queue.get())
			for obj in task_objects:
				if task_id != obj[0]:
					self.task_queue.put(obj)
			return
		process = self._pool.get(process_name)
		if process is None:
			# process might be already stopped -> ignore for now
			return
		process.hard_interrupt.set()

	def shutdown(self):
		for process in self._pool.values():
			process.hard_interrupt.set()
			self.task_queue.put(None)  # unblock queue

	def terminate(self):
		for process in self._pool.values():
			if process.exitcode is None:
				process.terminate()
예제 #29
0
def ChunkEncryptionProcess(chunkEncryptionQueue: multiprocessing.SimpleQueue,
                           blobInfoQueue: multiprocessing.SimpleQueue,
                           credentials: str, bucketName: str, processId: int,
                           nUploadThreads: int, uploadQueueMiB: int) -> None:
    def readChunk(path: str,
                  offset: int,
                  length: int = 1024 * 1024 * 32) -> bytes:
        fp = open(path, 'rb')
        assert offset == fp.seek(offset)
        chunk = fp.read(length)
        fp.close()
        return chunk

    def encryptChunk(chunk: bytes) -> list:
        # Choose a random name
        name = ''.join(random.choice('0123456789abcdef') for i in range(32))

        # Get decrypted stats
        decryptedSize = len(chunk)
        decryptedSha256 = hashlib.sha256(chunk).digest()

        # Generate key and encrypt chunk
        # Discard decrypted chunk at the same time
        encryptionKey = Fernet.generate_key()
        chunk = Fernet(encryptionKey).encrypt(chunk)

        # Get encrypted stats
        encryptedSize = len(chunk)
        encryptedSha256 = hashlib.sha256(chunk).digest()

        # Generate blob info
        blobInfo = BlobInfo(name, encryptionKey, encryptedSize,
                            encryptedSha256, decryptedSize, decryptedSha256)

        return [chunk, blobInfo]

    # Start blob upload threads
    blobUploadQueueBytes = ThreadValueLock(1024 * 1024 * uploadQueueMiB)
    blobUploadQueue = queue.SimpleQueue()
    blobUploadThreads = []
    for threadId in range(nUploadThreads):
        thread = Thread(target=BlobUploadThread,
                        args=[
                            blobUploadQueue, blobUploadQueueBytes, credentials,
                            bucketName, threadId
                        ],
                        name='BlobUploadThread{}'.format(threadId))
        thread.start()
        blobUploadThreads.append(thread)

    # Create stats variable
    ticToc = TicToc()

    # Process tasks until received None
    while True:
        # Set process title
        setproctitle.setproctitle('ChunkEncryptionProcess{}'.format(processId))

        # Get task
        task = chunkEncryptionQueue.get()
        if task is None:
            break

        # Start measuring time used for encryption
        elapsed = []
        ticToc.tic()

        # Extract task
        path: str = task[0]
        offset: int = task[1]
        fileSize: int = task[2]

        # Update process title
        setproctitle.setproctitle(
            'ChunkEncryptionProcess{} {}, chunk {}/{}'.format(
                processId, path, offset // (1024 * 1024 * 32),
                math.ceil(fileSize / (1024 * 1024 * 32))))

        # Read chunk
        chunk = readChunk(path, offset)

        # Encrypt chunk
        # Discard decrypted chunk at the same time
        result: list = encryptChunk(chunk)
        chunk: bytes = result[0]
        blobInfo: BlobInfo = result[1]

        # Stop measuring time used for encryption
        elapsed.append(ticToc.toc())

        # Start measuring time used waiting for upload queue
        ticToc.tic()

        # Send encrypted chunk to blob upload threads
        blobUploadQueueBytes.acquire(len(chunk))
        blobUploadQueue.put([blobInfo.name, chunk])

        # Stop measuring time used waiting for upload queue
        elapsed.append(ticToc.toc())

        # Send blob info to blob info collection process
        blobInfoQueue.put([
            path, offset, blobInfo, elapsed,
            blobUploadQueue.qsize(),
            blobUploadQueueBytes.getValue()
        ])

    # Stop blob upload threads
    for _ in range(nUploadThreads):
        blobUploadQueue.put(None)
    for thread in blobUploadThreads:
        thread.join()
예제 #30
0
async def servo_loop(device: str, sids: Sequence[int],
                     main2gvf: mp.SimpleQueue,
                     behaviour_policy: DiscretePolicy, coder: KanervaCoder,
                     **kwargs):
    # objects to read and write from servos
    sr, sw = await serial_asyncio.open_serial_connection(url=device, **kwargs)

    # set servo speeds to slowest possible
    for sid in sids:
        # await send_msg(sr, sw, sid, [])
        await send_msg(sr, sw, sid, [0x03, 0x20, 0x00, 0x01])

    # set initial action
    action = initial_action

    # some constants
    # read_data = [0x02,  # read
    #              0x24,  # starting from 0x24
    #              0x08]  # a string of 8 bytes

    read_all = [
        0x02,  # read
        0x00,  # starting from the beginning
        0x32
    ]  # all the bytes

    store_data = []

    try:
        for _ in range(20000):
            # read data from servos
            byte_data = [await send_msg(sr, sw, sid, read_all) for sid in sids]

            # convert to human-readable data
            obs = sum([parse_data(bd) for bd in byte_data], list(action))

            # make feature vector
            active_pts = coder(obs=obs, byte_data=byte_data)

            # get most recent weights from control GVFs
            pass

            # decide on an action
            action, action_prob = behaviour_policy(obs=obs, x=active_pts)

            # send action to servos
            instructions = [
                goal_instruction(a) for a in action if a is not None
            ]
            for sid, instr in zip(sids, instructions):
                await send_msg(sr, sw, sid, instr)

            # send action and features to GVFs
            gvf_data = (action, action_prob, obs, active_pts)
            if locks:
                print('main gm a 1 a')
                gmlock.acquire()
                print('main gm a 1 b')
            main2gvf.put(gvf_data)
            if locks:
                print('main gm r 1 a')
                gmlock.release()
                print('main gm r 1 b')

            # record data for later
            store_data.append(gvf_data)

        np.save('offline_data.npy', store_data)

    except KeyboardInterrupt:
        pass
    finally:
        sr.read()
        await sw.drain()

        for sid in sids:
            write(sw, sid, [0x03, 0x18, 0x00])  # disable torque
예제 #31
0
class ProcessPoolExecutor(_base.Executor):
    def __init__(self, max_workers=None):
        """Initializes a new ProcessPoolExecutor instance.

        Args:
            max_workers: The maximum number of processes that can be used to
                execute the given calls. If None or not given then as many
                worker processes will be created as the machine has processors.
        """
        _check_system_limits()

        if max_workers is None:
            self._max_workers = os.cpu_count() or 1
        else:
            if max_workers <= 0:
                raise ValueError("max_workers must be greater than 0")

            self._max_workers = max_workers

        # Make the call queue slightly larger than the number of processes to
        # prevent the worker processes from idling. But don't make it too big
        # because futures in the call queue cannot be cancelled.
        self._call_queue = multiprocessing.Queue(self._max_workers +
                                                 EXTRA_QUEUED_CALLS)
        # Killed worker processes can produce spurious "broken pipe"
        # tracebacks in the queue's own worker thread. But we detect killed
        # processes anyway, so silence the tracebacks.
        self._call_queue._ignore_epipe = True
        self._result_queue = SimpleQueue()
        self._work_ids = queue.Queue()
        self._queue_management_thread = None
        # Map of pids to processes
        self._processes = {}

        # Shutdown is a two-step process.
        self._shutdown_thread = False
        self._shutdown_lock = threading.Lock()
        self._broken = False
        self._queue_count = 0
        self._pending_work_items = {}

    def _start_queue_management_thread(self):
        # When the executor gets lost, the weakref callback will wake up
        # the queue management thread.
        def weakref_cb(_, q=self._result_queue):
            q.put(None)

        if self._queue_management_thread is None:
            # Start the processes so that their sentinels are known.
            self._adjust_process_count()
            self._queue_management_thread = threading.Thread(
                target=_queue_management_worker,
                args=(weakref.ref(self, weakref_cb), self._processes,
                      self._pending_work_items, self._work_ids,
                      self._call_queue, self._result_queue))
            self._queue_management_thread.daemon = True
            self._queue_management_thread.start()
            _threads_queues[self._queue_management_thread] = self._result_queue

    def _adjust_process_count(self):
        for _ in range(len(self._processes), self._max_workers):
            p = multiprocessing.Process(target=_process_worker,
                                        args=(self._call_queue,
                                              self._result_queue))
            p.start()
            self._processes[p.pid] = p

    def submit(self, fn, *args, **kwargs):
        with self._shutdown_lock:
            if self._broken:
                raise BrokenProcessPool(
                    'A child process terminated '
                    'abruptly, the process pool is not usable anymore')
            if self._shutdown_thread:
                raise RuntimeError(
                    'cannot schedule new futures after shutdown')

            f = _base.Future()
            w = _WorkItem(f, fn, args, kwargs)

            self._pending_work_items[self._queue_count] = w
            self._work_ids.put(self._queue_count)
            self._queue_count += 1
            # Wake up queue management thread
            self._result_queue.put(None)

            self._start_queue_management_thread()
            return f

    submit.__doc__ = _base.Executor.submit.__doc__

    def shutdown(self, wait=True):
        with self._shutdown_lock:
            self._shutdown_thread = True
        if self._queue_management_thread:
            # Wake up queue management thread
            self._result_queue.put(None)
            if wait:
                self._queue_management_thread.join()
        # To reduce the risk of opening too many files, remove references to
        # objects that use file descriptors.
        self._queue_management_thread = None
        self._call_queue = None
        self._result_queue = None
        self._processes = None

    shutdown.__doc__ = _base.Executor.shutdown.__doc__
예제 #32
0
def learning_loop(exit_flag: mp.Value, gvfs: Sequence[Sequence[GTDLearner]],
                  main2gvf: mp.SimpleQueue, gvf2plot: mp.SimpleQueue,
                  parsrs: List[Callable]):
    action, action_prob, obs, x = None, None, None, None

    # get first state
    while exit_flag.value == 0 and obs is None:
        while exit_flag.value == 0 and main2gvf.empty():
            time.sleep(0.01)
        if exit_flag.value == 0:
            if locks:
                print('gvf gm a 1 a')
                gmlock.acquire()
                print('gvf gm a 1 b')
            action, action_prob, obs, x = main2gvf.get()
            if locks:
                print('gvf gm r 1 a')
                gmlock.release()
                print('gvf gm r 1 b')

    # main loop
    # tt = 0
    # ts = []
    while exit_flag.value == 0:
        # ts.append(time.time() - tt) if tt > 0 else None
        # print(np.mean(ts))
        # tt = time.time()
        if locks:
            print('gvf gm a 2 a')
            gmlock.acquire()
            print('gvf gm a 2 b')
        while exit_flag.value == 0 and main2gvf.empty():
            if locks:
                print('gvf gm r 2 a')
                gmlock.release()
                print('gvf gm r 2 b')
            time.sleep(0.01)
            if locks:
                print('gvf gm a 3 a')
                gmlock.acquire()
                print('gvf gm a 3 b')
        if locks:
            print('gvf gm r 3 a')
            gmlock.release()
            print('gvf gm r 3 b')
        if exit_flag.value:
            break

        # get data from servos
        if locks:
            print('gvf gm a 4 a')
            gmlock.acquire()
            print('gvf gm a 4 b')
        actionp, action_probp, obsp, xp = main2gvf.get()
        if locks:
            print('gvf gm r 4 a')
            gmlock.release()
            print('gvf gm r 4 b')
        # update weights
        for gs, xi, xpi in zip(gvfs, x, xp):
            for g in gs:
                g.update(action, action_prob, obs, obsp, xi, xpi)

        # send data to plots
        gdata = [
            g.data(xi, obs, action, xpi, obsp)
            for gs, xi, xpi in zip(gvfs, x, xp) for g in gs
        ]

        data = dict(ChainMap(*gdata))
        data['obs'] = obs
        data['x'] = x
        data = [parse(data) for parse in parsrs]
        if locks:
            print('gvf gp a 1 a')
            gplock.acquire()
            print('gvf gp a 1 b')
        # data = np.copy(data)
        gvf2plot.put(data)
        if locks:
            print('gvf gp r 1 a')
            gplock.release()
            print('gvf gp r 1 b')

        # go to next state
        obs = obsp
        x = xp
        action = actionp
        action_prob = action_probp

    print('Done learning!')
예제 #33
0
class ProcessPoolExecutor(_base.Executor):
    def __init__(self, max_workers=None):
        """Initializes a new ProcessPoolExecutor instance.

        Args:
            max_workers: The maximum number of processes that can be used to
                execute the given calls. If None or not given then as many
                worker processes will be created as the machine has processors.
        """
        _check_system_limits()

        if max_workers is None:
            self._max_workers = os.cpu_count() or 1
        else:
            if max_workers <= 0:
                raise ValueError("max_workers must be greater than 0")

            self._max_workers = max_workers

        # Make the call queue slightly larger than the number of processes to
        # prevent the worker processes from idling. But don't make it too big
        # because futures in the call queue cannot be cancelled.
        self._call_queue = multiprocessing.Queue(self._max_workers +
                                                 EXTRA_QUEUED_CALLS)
        # Killed worker processes can produce spurious "broken pipe"
        # tracebacks in the queue's own worker thread. But we detect killed
        # processes anyway, so silence the tracebacks.
        self._call_queue._ignore_epipe = True
        self._result_queue = SimpleQueue()
        self._work_ids = queue.Queue()
        self._queue_management_thread = None
        # Map of pids to processes
        self._processes = {}

        # Shutdown is a two-step process.
        self._shutdown_thread = False
        self._shutdown_lock = threading.Lock()
        self._broken = False
        self._queue_count = 0
        self._pending_work_items = {}

    def _start_queue_management_thread(self):
        # When the executor gets lost, the weakref callback will wake up
        # the queue management thread.
        def weakref_cb(_, q=self._result_queue):
            q.put(None)

        if self._queue_management_thread is None:
            # Start the processes so that their sentinels are known.
            self._adjust_process_count()
            self._queue_management_thread = threading.Thread(
                target=_queue_management_worker,
                args=(weakref.ref(self, weakref_cb), self._processes,
                      self._pending_work_items, self._work_ids,
                      self._call_queue, self._result_queue))
            self._queue_management_thread.daemon = True
            self._queue_management_thread.start()
            _threads_queues[self._queue_management_thread] = self._result_queue

    def _adjust_process_count(self):
        for _ in range(len(self._processes), self._max_workers):
            p = multiprocessing.Process(target=_process_worker,
                                        args=(self._call_queue,
                                              self._result_queue))
            p.start()
            self._processes[p.pid] = p

    def submit(self, fn, *args, **kwargs):
        with self._shutdown_lock:
            if self._broken:
                raise BrokenProcessPool(
                    'A child process terminated '
                    'abruptly, the process pool is not usable anymore')
            if self._shutdown_thread:
                raise RuntimeError(
                    'cannot schedule new futures after shutdown')

            f = _base.Future()
            w = _WorkItem(f, fn, args, kwargs)

            self._pending_work_items[self._queue_count] = w
            self._work_ids.put(self._queue_count)
            self._queue_count += 1
            # Wake up queue management thread
            self._result_queue.put(None)

            self._start_queue_management_thread()
            return f

    submit.__doc__ = _base.Executor.submit.__doc__

    def map(self, fn, *iterables, timeout=None, chunksize=1):
        """Returns an iterator equivalent to map(fn, iter).

        Args:
            fn: A callable that will take as many arguments as there are
                passed iterables.
            timeout: The maximum number of seconds to wait. If None, then there
                is no limit on the wait time.
            chunksize: If greater than one, the iterables will be chopped into
                chunks of size chunksize and submitted to the process pool.
                If set to one, the items in the list will be sent one at a time.

        Returns:
            An iterator equivalent to: map(func, *iterables) but the calls may
            be evaluated out-of-order.

        Raises:
            TimeoutError: If the entire result iterator could not be generated
                before the given timeout.
            Exception: If fn(*args) raises for any values.
        """
        if chunksize < 1:
            raise ValueError("chunksize must be >= 1.")

        results = super().map(partial(_process_chunk, fn),
                              _get_chunks(*iterables, chunksize=chunksize),
                              timeout=timeout)
        return _chain_from_iterable_of_lists(results)

    def shutdown(self, wait=True):
        with self._shutdown_lock:
            self._shutdown_thread = True
        if self._queue_management_thread:
            # Wake up queue management thread
            self._result_queue.put(None)
            if wait:
                self._queue_management_thread.join()
        # To reduce the risk of opening too many files, remove references to
        # objects that use file descriptors.
        self._queue_management_thread = None
        if self._call_queue is not None:
            self._call_queue.close()
            if wait:
                self._call_queue.join_thread()
            self._call_queue = None
        self._result_queue = None
        self._processes = None

    shutdown.__doc__ = _base.Executor.shutdown.__doc__
예제 #34
0
    procs = []

    if args.action in ["training", "inference"]:

        if (args.action == "inference" and
            "*" in config["model_path"] and
            "--model_path" not in more_args):
            more_args += glob_to_more_args(config["model_path"], "model_path")

        configurations = make_configs_from(config, more_args)

        target = train if args.action == "training" else run_inference

        gpu_queue = SimpleQueue()
        for idx in get_gpus():
            gpu_queue.put(str(idx))


        try:
            #blockPrint()
            for c in configurations:

                #if any(t in c["model_path"] for t in []):
                while gpu_queue.empty():
                    sleep(10)

                p = Process(target=target, args=(c, gpu_queue))
                procs.append(p)
                p.start()
                sleep(10)
예제 #35
0
class DynamodbToS3Test(unittest.TestCase):
    def setUp(self):
        self.output_queue = SimpleQueue()

        def mock_upload_file(Filename, Bucket, Key):  # pylint: disable=unused-argument,invalid-name
            with open(Filename) as f:
                lines = f.readlines()
                for line in lines:
                    self.output_queue.put(json.loads(line))

        self.mock_upload_file_func = mock_upload_file

    def output_queue_to_list(self):
        items = []
        while not self.output_queue.empty():
            items.append(self.output_queue.get())
        return items

    @patch('airflow.contrib.operators.dynamodb_to_s3.S3Hook')
    @patch('airflow.contrib.operators.dynamodb_to_s3.AwsDynamoDBHook')
    def test_dynamodb_to_s3_success(self, mock_aws_dynamodb_hook,
                                    mock_s3_hook):
        responses = [
            {
                'Items': [{
                    'a': 1
                }, {
                    'b': 2
                }],
                'LastEvaluatedKey': '123',
            },
            {
                'Items': [{
                    'c': 3
                }],
            },
        ]
        table = MagicMock()
        table.return_value.scan.side_effect = responses
        mock_aws_dynamodb_hook.return_value.get_conn.return_value.Table = table

        s3_client = MagicMock()
        s3_client.return_value.upload_file = self.mock_upload_file_func
        mock_s3_hook.return_value.get_conn = s3_client

        dynamodb_to_s3_operator = DynamoDBToS3Operator(
            task_id='dynamodb_to_s3',
            dynamodb_table_name='airflow_rocks',
            s3_bucket_name='airflow-bucket',
            file_size=4000,
        )

        dynamodb_to_s3_operator.execute(context={})

        self.assertEqual([{
            'a': 1
        }, {
            'b': 2
        }, {
            'c': 3
        }], self.output_queue_to_list())
예제 #36
0
class WindowRecorder:
    """Programatically video record a window in Linux (requires xwininfo)"""
    def __init__(self,
                 window_names: Iterable[AnyStr] = None,
                 frame_rate=30.0,
                 name_suffix="",
                 save_dir=None):
        if window_names is None:
            logger.info(
                "Select a window to record by left clicking with your mouse")
            output = subprocess.check_output(["xwininfo"],
                                             universal_newlines=True)
            logger.info(f"Selected {output}")
        else:
            for name in window_names:
                try:
                    output = subprocess.check_output(
                        ["xwininfo", "-name", name], universal_newlines=True)
                    break
                except subprocess.CalledProcessError as e:
                    logger.debug(
                        f"Could not find window named {name}, trying next in list"
                    )
                    pass
            else:
                raise RuntimeError(
                    f"Could not find any windows with names from {window_names}"
                )

        properties = {}
        for line in output.split("\n"):
            if ":" in line:
                parts = line.split(":", 1)
                properties[parts[0].strip()] = parts[1].strip()

        left, top = int(properties["Absolute upper-left X"]), int(
            properties["Absolute upper-left Y"])
        width, height = int(properties["Width"]), int(properties["Height"])

        self.monitor = {
            "top": top,
            "left": left,
            "width": width,
            "height": height
        }
        self.frame_rate = frame_rate
        self.suffix = name_suffix
        self.save_dir = save_dir
        if self.save_dir is None:
            self.save_dir = cfg.CAPTURE_DIR

    def __enter__(self):
        if not os.path.exists(self.save_dir):
            raise FileNotFoundError(
                f"Trying to record to {self.save_dir}, but folder does not exist"
            )

        output = os.path.join(
            self.save_dir,
            f"{datetime.now().strftime('%Y_%m_%d_%H_%M_%S')}_{self.suffix}.mp4"
        )
        logger.debug(f"Recording video to {output}")
        self.q = SimpleQueue()
        self.record_process = Process(target=_record_loop,
                                      args=(self.q, output, self.monitor,
                                            self.frame_rate))
        self.record_process.start()
        return self

    def __exit__(self, *args):
        self.q.put('die')
        self.record_process.join()
        cv2.destroyAllWindows()
예제 #37
0
def job(n: int, results: SimpleQueue) -> None:  # <6>
    results.put((n, check(n)))  # <7>
예제 #38
0
def main():

    default_params = {
        "dil": True,
        "dil_iter": 5,
        "dil_k_sz": 3,
        "dil_type": 0,
        "eql_h": False,
        "k_sz": (16, 16),
        "min_c_sz": 900,
        "sub_lr": -1,
        "thresh": 109
    }
    param_dict_1 = {}
    corners = []
    detection_params = {}
    skip_config = False
    start_pause = False

    if len(sys.argv) > 1 and len(sys.argv[1]) > 0:
        for p in sys.argv[1].split(";"):
            k, v = p.split("=")
            param_dict_1[k] = v
    if "corners" in param_dict_1:
        for p in param_dict_1["corners"].split(","):
            corner_x, corner_y = p.split(":")
            corners.append((int(corner_x), int(corner_y)))
    if "dil" in param_dict_1:
        detection_params["dil"] = param_dict_1["dil"] in ("True", )
    else:
        detection_params["dil"] = default_params["dil"]
    detection_params["dil_iter"] = int(
        param_dict_1.get("dil_iter", default_params["dil_iter"]))
    detection_params["dil_k_sz"] = int(
        param_dict_1.get("dil_k_sz", default_params["dil_k_sz"]))
    detection_params["dil_type"] = int(
        param_dict_1.get("dil_type", default_params["dil_type"]))
    if "eql_h" in param_dict_1:
        detection_params["eql_h"] = param_dict_1["eql_h"] in ("True", )
    else:
        detection_params["eql_h"] = default_params["eql_h"]
    if "k_sz" in param_dict_1:
        detection_params["k_sz"] = (int(param_dict_1["k_sz"]),
                                    int(param_dict_1["k_sz"]))
    else:
        detection_params["k_sz"] = default_params["k_sz"]
    detection_params["min_c_sz"] = int(
        param_dict_1.get("min_c_sz", default_params["min_c_sz"]))
    detection_params["sub_lr"] = int(
        param_dict_1.get("sub_lr", default_params["sub_lr"]))
    detection_params["thresh"] = int(
        param_dict_1.get("thresh", default_params["thresh"]))
    if "skip_config" in param_dict_1:
        skip_config = param_dict_1["skip_config"] in ("True", )
    if "start_pause" in param_dict_1:
        start_pause = param_dict_1["start_pause"] in ("True", )
    srv_addr = param_dict_1.get("srv_addr", SRV_ADDR)
    srv_port = int(param_dict_1.get("srv_port", SRV_PORT))

    cap = cv2.VideoCapture(CAM_ID + CAM_DRIVER)
    if not cap.isOpened:
        print("Err 1")
        exit(1)

    cap.set(cv2.CAP_PROP_FRAME_WIDTH, CAM_W)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, CAM_H)

    v_size = (int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)),
              int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), 3)
    cap_s = (cap, v_size)

    if not skip_config:
        while True:
            if len(corners) == 0:
                corners = prepare_cam(cap_s)
            t_mat, t_size = get_t(corners)
            if check_cam(cap_s, t_mat, t_size):
                break
            corners.clear()

        if DEBUG:
            t_mat = np.array(
                [[1.01856022e+00, -3.91099919e-02, -1.25259952e+03],
                 [6.21716294e-03, 1.00510801e+00, -6.94297526e+02],
                 [1.54955299e-05, -2.16240961e-05, 1.00000000e+00]],
                dtype=np.float32)
            t_size = (608, 970, 3)

        if ALGO == "MOG2":
            back_sub = cv2.createBackgroundSubtractorMOG2(detectShadows=False)
        else:
            back_sub = cv2.createBackgroundSubtractorKNN(history=1000,
                                                         detectShadows=False)

        while True:
            get_params_thresh_blur(detection_params, cap_s, t_mat, t_size,
                                   back_sub)
            if check_params(cap_s, t_mat, t_size, detection_params, back_sub):
                break

        get_params_min_cont_sz(cap_s, t_mat, t_size, detection_params,
                               back_sub)
    else:
        t_mat, t_size = get_t(corners)
        back_sub = cv2.createBackgroundSubtractorKNN(history=1000,
                                                     detectShadows=False)

    results_q = SimpleQueue()
    server_process = Process(target=send_results,
                             args=(results_q, srv_addr, srv_port))
    server_process.start()

    detect(cap_s, t_mat, t_size, detection_params, back_sub, results_q,
           start_pause)

    results_q.put(None)
    cap.release()
    server_process.join()

    print(
        "corners={}:{},{}:{},{}:{},{}:{};dil={dil};dil_iter={dil_iter};dil_k_sz={dil_k_sz};dil_type={dil_type};eql_h={eql_h};k_sz={k_sz_first};min_c_sz={min_c_sz};sub_lr={sub_lr};thresh={thresh};srv_addr={srv_addr_x};srv_port={srv_port_x};skip_config={skip_config_x};start_pause={start_pause_x}"
        .format(corners[0][0],
                corners[0][1],
                corners[1][0],
                corners[1][1],
                corners[2][0],
                corners[2][1],
                corners[3][0],
                corners[3][1],
                **detection_params,
                k_sz_first=detection_params["k_sz"][0],
                srv_addr_x=srv_addr,
                srv_port_x=srv_port,
                skip_config_x=skip_config,
                start_pause_x=start_pause))
예제 #39
0
worker.start()
'''start background Thread that will run a loop to check run statistics and print
We use thread, because I think this is necessary for a process that watches global variables like linesTested'''
worker = Thread(target=checkStats)
worker.daemon = True
worker.start()

##########################################################

headerLine = "\t".join(args.headers) if args.headers else None

if args.windType == "cat":
    window = genomics.parseGenoFile(genoFile,
                                    headerLine=headerLine,
                                    names=sampleData.indNames)
    windowQueue.put((windowsQueued, window))
    windowsQueued += 1

else:
    #get windows and analyse
    if args.windType == "coordinate":
        windowGenerator = genomics.slidingCoordWindows(
            genoFile,
            windSize,
            stepSize,
            headerLine=headerLine,
            names=sampleData.indNames,
            include=scafsToInclude,
            exclude=scafsToExclude)
    elif args.windType == "sites":
        windowGenerator = genomics.slidingSitesWindows(
예제 #40
0
async def servo_loop(device: str,
                     sids: Sequence[int],
                     coder: KanervaCoder,
                     main2gvf: mp.SimpleQueue,
                     behaviour_policy: DiscretePolicy,
                     **kwargs):
    # objects to read and write from servos
    sr, sw = await serial_asyncio.open_serial_connection(url=device,
                                                         **kwargs)
    for sid in sids:
        await send_msg(sr, sw, sid, [0x03, 0x20, 0x00, 0x01])

    # set initial action
    action = initial_action

    # some constants
    read_data = [0x02,  # read
                 0x24,  # starting from 0x24
                 0x08]  # a string of 8 bytes

    # hi = [1.7, 12, 300, 12, 90]
    # lo = [-1.7, 0, -300, 0, -10]
    # observation = np.random.random(5)
    # for i in range(observation.size):
    #     observation[i] = observation[i] * (hi[i] - lo[i]) + lo[i]
    # observation = list(observation)

    store_data = []

    try:
        for _ in range(5000):
            # read data from servos
            byte_data = [await send_msg(sr, sw, sid, read_data) for sid in sids]

            # convert to human-readable data
            obs = sum([parse_data(bd) for bd in byte_data], []) + list(action)

            # get active tiles in kanerva coding
            active_pts = coder(obs)

            # get most recent weights from control GVFs
            pass

            # decide on an action
            action, action_prob = behaviour_policy(obs=obs)

            # send action and features to GVFs
            gvf_data = (action, action_prob, obs, active_pts)
            main2gvf.put(gvf_data)

            # record data for later
            store_data.append(gvf_data)

            # send action to servos
            instructions = [goal_instruction(a) for a in action]
            for sid, instr in zip(sids, instructions):
                await send_msg(sr, sw, sid, instr)

        np.save('offline_data.npy', store_data)

    except KeyboardInterrupt:
        pass
    finally:
        sr.read()
        await sw.drain()

        for sid in sids:
            write(sw, sid, [0x03, 0x18, 0x00])  # disable torque
예제 #41
0
def import_tables(options, files_info):
    start_time = time.time()
    
    tables = dict(((x.db, x.table), x) for x in files_info) # (db, table) => table
    
    work_queue      = SimpleQueue()
    error_queue     = SimpleQueue()
    warning_queue   = SimpleQueue()
    done_event      = multiprocessing.Event()
    exit_event      = multiprocessing.Event()
    interrupt_event = multiprocessing.Event()
    
    pools = []
    progressBar = None
    progressBarSleep = 0.2
    
    # setup KeyboardInterupt handler
    signal.signal(signal.SIGINT, lambda a, b: abort_import(pools, exit_event, interrupt_event))
    
    try:
        # - start the progress bar
        if not options.quiet:
            progressBar = multiprocessing.Process(
                target=update_progress,
                name="progress bar",
                args=(files_info, options, done_event, exit_event, progressBarSleep)
            )
            progressBar.start()
            pools.append([progressBar])
        
        # - start the writers
        writers = []
        pools.append(writers)
        for i in range(options.clients):
            writer = multiprocessing.Process(
                target=table_writer, name="table writer %d" % i,
                kwargs={
                    "tables":tables, "options":options,
                    "work_queue":work_queue, "error_queue":error_queue, "warning_queue":warning_queue,
                    "exit_event":exit_event
                }
            )
            writers.append(writer)
            writer.start()
        
        # - read the tables options.clients at a time
        readers = []
        pools.append(readers)
        filesLeft = len(files_info)
        fileIter = iter(files_info)
        while filesLeft and not exit_event.is_set():
            # add a workers to fill up the readers pool
            while filesLeft and len(readers) < options.clients:
                table = next(fileIter)
                reader = multiprocessing.Process(
                    target=table_reader, name="table reader %s.%s" % (table.db, table.table),
                    kwargs={
                        "table":table, "options":options, "work_queue":work_queue,
                        "error_queue":error_queue, "warning_queue":warning_queue,
                        "exit_event":exit_event
                    }
                )
                readers.append(reader)
                reader.start()
                filesLeft -= 1
            
            # reap completed tasks
            if filesLeft:
                for reader in readers[:]:
                    if not reader.is_alive():
                        readers.remove(reader)
                    if filesLeft and len(readers) == options.clients:
                        time.sleep(.05)
        
        # - wait for the last batch of readers to complete
        while readers:
             for reader in readers[:]:
                if exit_event.is_set():
                    reader.terminate() # kill it abruptly
                reader.join(.1)
                if not reader.is_alive():
                    readers.remove(reader)
        
        # - append enough StopIterations to signal all writers
        if not exit_event.is_set():
            for _ in writers:
                work_queue.put((None, None, StopIteration()))
        
        # - wait for all of the writers
        for writer in writers[:]:
            while not interrupt_event.is_set():
                if not writer.is_alive():
                    writers.remove(writer)
                    break
                writer.join()
            # kill off the remainder
            if writer in writers:
                try:
                    writer.terminate()
                except Exception: pass
                
        # - stop the progress bar
        if progressBar:
            done_event.set()
            progressBar.join(progressBarSleep * 2)
            if not interrupt_event.is_set():
                utils_common.print_progress(1, indent=2)
            if progressBar.is_alive():
                progressBar.terminate()
        
        # - drain the error_queue
        errors = []
        while not error_queue.empty():
            errors.append(error_queue.get())
        
        # - if successful, make sure 100% progress is reported
        if len(errors) == 0 and not interrupt_event.is_set() and not options.quiet:
            utils_common.print_progress(1.0, indent=2)
        
        plural = lambda num, text: "%d %s%s" % (num, text, "" if num == 1 else "s")
        if not options.quiet:
            # Continue past the progress output line
            print("\n  %s imported to %s in %.2f secs" % (plural(sum(x.rows_written for x in files_info), "row"), plural(len(files_info), "table"), time.time() - start_time))
    finally:
        signal.signal(signal.SIGINT, signal.SIG_DFL)
    
    if interrupt_event.is_set():
        raise RuntimeError("Interrupted")
    
    if len(errors) != 0:
        for error in errors:
            print("%s" % error.message, file=sys.stderr)
            if options.debug and error.traceback:
                print("  Traceback:\n%s" % error.traceback, file=sys.stderr)
            if len(error.file) == 4:
                print("  In file: %s" % error.file, file=sys.stderr)
        raise RuntimeError("Errors occurred during import")
    
    if not warning_queue.empty():
        while not warning_queue.empty():
            warning = warning_queue.get()
            print("%s" % warning[1], file=sys.stderr)
            if options.debug:
                print("%s traceback: %s" % (warning[0].__name__, warning[2]), file=sys.stderr)
            if len(warning) == 4:
                print("In file: %s" % warning[3], file=sys.stderr)
        raise RuntimeError("Warnings occurred during import")
예제 #42
0
파일: config.py 프로젝트: Yelp/pidtree-bcc
def _forward_config_change(queue: SimpleQueue, config_data: dict):
    queue.put(config_data)
예제 #43
0
class SafeQueue(object):
    """
    Many writers Single Reader multiprocessing safe Queue
    """
    __thread_pool = SingletonThreadPool()

    def __init__(self, *args, **kwargs):
        self._reader_thread = None
        self._q = SimpleQueue(*args, **kwargs)
        # Fix the simple queue write so it uses a single OS write, making it atomic message passing
        # noinspection PyBroadException
        try:
            self._q._writer._send_bytes = partial(
                SafeQueue._pipe_override_send_bytes, self._q._writer)
        except Exception:
            pass
        self._internal_q = None
        self._q_size = 0

    def empty(self):
        return self._q.empty() and (not self._internal_q
                                    or self._internal_q.empty())

    def is_pending(self):
        # check if we have pending requests to be pushed (it does not mean they were pulled)
        # only call from main put process
        return self._q_size > 0

    def close(self, event):
        # wait until all pending requests pushed
        while self.is_pending():
            if event:
                event.set()
            sleep(0.1)

    def get(self, *args, **kwargs):
        return self._get_internal_queue(*args, **kwargs)

    def batch_get(self, max_items=1000, timeout=0.2, throttle_sleep=0.1):
        buffer = []
        timeout_count = int(timeout / throttle_sleep)
        empty_count = timeout_count
        while len(buffer) < max_items:
            while not self.empty() and len(buffer) < max_items:
                try:
                    buffer.append(self._get_internal_queue(block=False))
                    empty_count = 0
                except Empty:
                    break
            empty_count += 1
            if empty_count > timeout_count or len(buffer) >= max_items:
                break
            sleep(throttle_sleep)
        return buffer

    def put(self, obj):
        # GIL will make sure it is atomic
        self._q_size += 1
        # make sure the block put is done in the thread pool i.e. in the background
        obj = pickle.dumps(obj)
        self.__thread_pool.get().apply_async(self._q_put, args=(obj, ))

    def _q_put(self, obj):
        self._q.put(obj)
        # GIL will make sure it is atomic
        self._q_size -= 1

    def _get_internal_queue(self, *args, **kwargs):
        if not self._internal_q:
            self._internal_q = TrQueue()
        if not self._reader_thread:
            self._reader_thread = Thread(target=self._reader_daemon)
            self._reader_thread.daemon = True
            self._reader_thread.start()
        obj = self._internal_q.get(*args, **kwargs)
        # deserialize
        return pickle.loads(obj)

    def _reader_daemon(self):
        # pull from process queue and push into thread queue
        while True:
            # noinspection PyBroadException
            try:
                obj = self._q.get()
                if obj is None:
                    break
            except Exception:
                break
            self._internal_q.put(obj)

    @staticmethod
    def _pipe_override_send_bytes(self, buf):
        n = len(buf)
        # For wire compatibility with 3.2 and lower
        header = struct.pack("!i", n)
        # Issue #20540: concatenate before sending, to avoid delays due
        # to Nagle's algorithm on a TCP socket.
        # Also note we want to avoid sending a 0-length buffer separately,
        # to avoid "broken pipe" errors if the other end closed the pipe.
        self._send(header + buf)
예제 #44
0
def learning_loop(exit_flag: mp.Value,
                  gvfs: Sequence[Sequence[GTDLearner]],
                  main2gvf: mp.SimpleQueue,
                  gvf2plot: mp.SimpleQueue,
                  parsrs: List[Callable]):
    action, action_prob, obs, x = None, None, None, None

    # get first state
    while exit_flag.value == 0 and obs is None:
        while exit_flag.value == 0 and main2gvf.empty():
            time.sleep(0.01)
        if exit_flag.value == 0:
            if locks:
                print('gvf gm a 1 a')
                gmlock.acquire()
                print('gvf gm a 1 b')
            action, action_prob, obs, x = main2gvf.get()
            if locks:
                print('gvf gm r 1 a')
                gmlock.release()
                print('gvf gm r 1 b')

    # main loop
    # tt = 0
    # ts = []
    while exit_flag.value == 0:
        # ts.append(time.time() - tt) if tt > 0 else None
        # print(np.mean(ts))
        # tt = time.time()
        if locks:
            print('gvf gm a 2 a')
            gmlock.acquire()
            print('gvf gm a 2 b')
        while exit_flag.value == 0 and main2gvf.empty():
            if locks:
                print('gvf gm r 2 a')
                gmlock.release()
                print('gvf gm r 2 b')
            time.sleep(0.01)
            if locks:
                print('gvf gm a 3 a')
                gmlock.acquire()
                print('gvf gm a 3 b')
        if locks:
            print('gvf gm r 3 a')
            gmlock.release()
            print('gvf gm r 3 b')
        if exit_flag.value:
            break

        # get data from servos
        if locks:
            print('gvf gm a 4 a')
            gmlock.acquire()
            print('gvf gm a 4 b')
        actionp, action_probp, obsp, xp = main2gvf.get()
        if locks:
            print('gvf gm r 4 a')
            gmlock.release()
            print('gvf gm r 4 b')
        # update weights
        for gs, xi, xpi in zip(gvfs, x, xp):
            for g in gs:
                g.update(action, action_prob, obs, obsp, xi, xpi)

        # send data to plots
        gdata = [g.data(xi, obs, action, xpi, obsp)
                 for gs, xi, xpi in zip(gvfs, x, xp)
                 for g in gs]

        data = dict(ChainMap(*gdata))
        data['obs'] = obs
        data['x'] = x
        data = [parse(data) for parse in parsrs]
        if locks:
            print('gvf gp a 1 a')
            gplock.acquire()
            print('gvf gp a 1 b')
        # data = np.copy(data)
        gvf2plot.put(data)
        if locks:
            print('gvf gp r 1 a')
            gplock.release()
            print('gvf gp r 1 b')

        # go to next state
        obs = obsp
        x = xp
        action = actionp
        action_prob = action_probp

    print('Done learning!')
        lst = list()
        process_list = list()
        que = SimpleQueue()
        pool = Pool(processes=POOL_NUM)

        page_lst = get_image_page(HTML_PAGE % HTML_NUM)
        page_num = len(page_lst)
        print("Get image page %d, analyzing..." % page_num)

        for url in page_lst:
            res = pool.apply_async(get_image_link, (url, ))
            res.wait()
            image_url = res.get()
            if image_url is None:
                continue
            que.put(image_url)
            pro = Process(target=create_image, args=(que, directory, COUNTER))
            process_list.append(pro)
            pro.start()

        pool.close()
        pool.join()

        for pro in process_list:
            pro.join()

        print("\nUse %8s .  Total %d .  AT %s  .\a\a" %
              (time.time() - start_time, COUNTER.value, directory))
        if page_num != COUNTER.value:
            print("Warning: Not each page's image had got. %s\a\a" %
                  (HTML_PAGE % HTML_NUM))
예제 #46
0
파일: base.py 프로젝트: LibrERP/Pylibs
class AbstractWorker(abc.ABC, Process):

    # NOTE: TASK_NAME must be overridden by derived classes
    TASK_NAME = None

    def __init__(self,
                 name: str,
                 connection_params: ConnectionParams,
                 tasks: Sized,
                 q: Queue,
                 max_retry: int = 4):
        super().__init__(name=name)

        self._connection_params = connection_params
        self._queue = q
        self._tasks = tasks
        # Number fo items downloaded: It's equal to list index of the last downloaded item plus 1.
        self._fetched_items = 0
        self._iterations_counter = 0
        self._max_retry = max_retry
        self._av2000 = None
        self._navigator = None
        self._exception_queue = SimpleQueue()

    # end __init__

    def run(self):

        while self._max_retry_ok(
        ) and not self._error() and not self._task_completed():

            # Open connection to av2000
            self._av2000 = AV2000Driver(self._connection_params)
            self._navigator = Navigator(self._av2000)

            # Notify task starting
            print(f'{self.name} Running task {self.TASK_NAME}', flush=True)

            try:
                self._task_loop()

            except LoadingTimeout as lte:
                # Increase the iterations counter
                print(f'{self.name} ERROR: LoadingTimeout', flush=True)
                self._iterations_counter = self._iterations_counter + 1

                _logger.error(' - ' * 10 + 'TERMINAL DUMP - BEGIN' +
                              ' - ' * 10)
                for dline in lte.av2000_driver.display_lines:
                    _logger.error(dline)
                # end for
                _logger.error(' - ' * 10 + 'TERMINAL DUMP - END  ' +
                              ' - ' * 10)

            except Exception as e:
                # Unexpected error
                print(f'{self.name} ERROR', flush=True)
                print(e, flush=True)
                self._exception_queue.put(e)

            finally:
                # Always close the connection when exiting the loop
                print(f'{self.name} finally', flush=True)
                self._navigator.exit()

            # end try / except

        # end while

        # Loop completed, let's check what happened
        if self._task_completed():
            sys.exit(0)
        elif self._error():
            sys.exit(1)
        else:
            sys.exit(2)
        # end if

    # end run

    def _max_retry_ok(self):
        return self._iterations_counter < self._max_retry

    # end _iterations_ok

    def _task_completed(self):
        return len(self._tasks) == self._fetched_items

    # end task completed

    def _error(self):
        return not self._exception_queue.empty()

    # end _error

    @abc.abstractmethod
    def _task_loop(self):
        pass
예제 #47
0
async def servo_loop(device: str,
                     sids: Sequence[int],
                     main2gvf: mp.SimpleQueue,
                     behaviour_policy: DiscretePolicy,
                     coder: KanervaCoder,
                     **kwargs):
    # objects to read and write from servos
    sr, sw = await serial_asyncio.open_serial_connection(url=device,
                                                         **kwargs)

    # set servo speeds to slowest possible
    for sid in sids:
        # await send_msg(sr, sw, sid, [])
        await send_msg(sr, sw, sid, [0x03, 0x20, 0x00, 0x01])

    # set initial action
    action = initial_action

    # some constants
    # read_data = [0x02,  # read
    #              0x24,  # starting from 0x24
    #              0x08]  # a string of 8 bytes

    read_all = [0x02,  # read
                0x00,  # starting from the beginning
                0x32]  # all the bytes

    store_data = []

    try:
        for _ in range(20000):
            # read data from servos
            byte_data = [await send_msg(sr, sw, sid, read_all) for sid in sids]

            # convert to human-readable data
            obs = sum([parse_data(bd) for bd in byte_data], list(action))

            # make feature vector
            active_pts = coder(obs=obs, byte_data=byte_data)

            # get most recent weights from control GVFs
            pass

            # decide on an action
            action, action_prob = behaviour_policy(obs=obs, x=active_pts)

            # send action to servos
            instructions = [goal_instruction(a)
                            for a in action
                            if a is not None]
            for sid, instr in zip(sids, instructions):
                await send_msg(sr, sw, sid, instr)

            # send action and features to GVFs
            gvf_data = (action, action_prob, obs, active_pts)
            if locks:
                print('main gm a 1 a')
                gmlock.acquire()
                print('main gm a 1 b')
            main2gvf.put(gvf_data)
            if locks:
                print('main gm r 1 a')
                gmlock.release()
                print('main gm r 1 b')

            # record data for later
            store_data.append(gvf_data)

        np.save('offline_data.npy', store_data)

    except KeyboardInterrupt:
        pass
    finally:
        sr.read()
        await sw.drain()

        for sid in sids:
            write(sw, sid, [0x03, 0x18, 0x00])  # disable torque
예제 #48
0
########################################################################################################################

#place lines into pods
#pass pods on to processor(s)
podSize = args.podSize

pod = []
podNumber = 0

for line in In:
    linesRead += 1
    pod.append((linesRead, line))
    
    if linesRead % podSize == 0:
        inQueue.put((podNumber,pod))
        if verbose:
            sys.stderr.write("Pod {} sent for analysis...\n".format(podNumber))
        podNumber += 1
        podsQueued += 1
        pod = []


#run remaining lines in pod

if len(pod) > 0:
  inQueue.put((podNumber,pod))
  podsQueued += 1
  if verbose:
        sys.stderr.write("Pod {} sent for analysis...\n".format(podNumber))
예제 #49
0
class ProcessPoolExecutor(_base.Executor):
    def __init__(self, max_workers=None):
        """Initializes a new ProcessPoolExecutor instance.

        Args:
            max_workers: The maximum number of processes that can be used to
                execute the given calls. If None or not given then as many
                worker processes will be created as the machine has processors.
        """
        _check_system_limits()

        if max_workers is None:
            self._max_workers = os.cpu_count() or 1
        else:
            if max_workers <= 0:
                raise ValueError("max_workers must be greater than 0")

            self._max_workers = max_workers

        # Make the call queue slightly larger than the number of processes to
        # prevent the worker processes from idling. But don't make it too big
        # because futures in the call queue cannot be cancelled.
        self._call_queue = multiprocessing.Queue(self._max_workers +
                                                 EXTRA_QUEUED_CALLS)
        # Killed worker processes can produce spurious "broken pipe"
        # tracebacks in the queue's own worker thread. But we detect killed
        # processes anyway, so silence the tracebacks.
        self._call_queue._ignore_epipe = True
        self._result_queue = SimpleQueue()
        self._work_ids = queue.Queue()
        self._queue_management_thread = None
        # Map of pids to processes
        self._processes = {}

        # Shutdown is a two-step process.
        self._shutdown_thread = False
        self._shutdown_lock = threading.Lock()
        self._broken = False
        self._queue_count = 0
        self._pending_work_items = {}

    def _start_queue_management_thread(self):
        # When the executor gets lost, the weakref callback will wake up
        # the queue management thread.
        def weakref_cb(_, q=self._result_queue):
            q.put(None)

        if self._queue_management_thread is None:
            # Start the processes so that their sentinels are known.
            self._adjust_process_count()
            self._queue_management_thread = threading.Thread(
                target=_queue_management_worker,
                args=(weakref.ref(self, weakref_cb),
                      self._processes,
                      self._pending_work_items,
                      self._work_ids,
                      self._call_queue,
                      self._result_queue))
            self._queue_management_thread.daemon = True
            self._queue_management_thread.start()
            _threads_queues[self._queue_management_thread] = self._result_queue

    def _adjust_process_count(self):
        for _ in range(len(self._processes), self._max_workers):
            p = multiprocessing.Process(
                target=_process_worker,
                args=(self._call_queue,
                      self._result_queue))
            p.start()
            self._processes[p.pid] = p

    def submit(self, fn, *args, **kwargs):
        with self._shutdown_lock:
            if self._broken:
                raise BrokenProcessPool('A child process terminated '
                                        'abruptly, the process pool is not usable anymore')
            if self._shutdown_thread:
                raise RuntimeError('cannot schedule new futures after shutdown')

            f = _base.Future()
            w = _WorkItem(f, fn, args, kwargs)

            self._pending_work_items[self._queue_count] = w
            self._work_ids.put(self._queue_count)
            self._queue_count += 1
            # Wake up queue management thread
            self._result_queue.put(None)

            self._start_queue_management_thread()
            return f

    submit.__doc__ = _base.Executor.submit.__doc__

    def map(self, fn, *iterables, timeout=None, chunksize=1):
        """Returns an iterator equivalent to map(fn, iter).

        Args:
            fn: A callable that will take as many arguments as there are
                passed iterables.
            timeout: The maximum number of seconds to wait. If None, then there
                is no limit on the wait time.
            chunksize: If greater than one, the iterables will be chopped into
                chunks of size chunksize and submitted to the process pool.
                If set to one, the items in the list will be sent one at a time.

        Returns:
            An iterator equivalent to: map(func, *iterables) but the calls may
            be evaluated out-of-order.

        Raises:
            TimeoutError: If the entire result iterator could not be generated
                before the given timeout.
            Exception: If fn(*args) raises for any values.
        """
        if chunksize < 1:
            raise ValueError("chunksize must be >= 1.")

        results = super().map(partial(_process_chunk, fn),
                              _get_chunks(*iterables, chunksize=chunksize),
                              timeout=timeout)
        return _chain_from_iterable_of_lists(results)

    def shutdown(self, wait=True):
        with self._shutdown_lock:
            self._shutdown_thread = True
        if self._queue_management_thread:
            # Wake up queue management thread
            self._result_queue.put(None)
            if wait:
                self._queue_management_thread.join()
        # To reduce the risk of opening too many files, remove references to
        # objects that use file descriptors.
        self._queue_management_thread = None
        self._call_queue = None
        self._result_queue = None
        self._processes = None

    shutdown.__doc__ = _base.Executor.shutdown.__doc__
예제 #50
0
class SQLiteBroker(object):
    """
    Multithread broker to query a SQLite db
    """
    def __init__(self, db_file="sqlite_db.sqlite", lock_wait_time=120):
        self.db_file = db_file
        self.connection = sqlite3.connect(self.db_file)
        self.broker_cursor = self.connection.cursor()
        self.broker_queue = SimpleQueue()
        self.broker = None
        self.lock_wait_time = lock_wait_time
        # ToDo: Set up a process pool to limit number of query threads

    def create_table(self, table_name, fields):
        """
        Make a new table in the database
        :param table_name: What do you want your table named?
        :param fields: field names and any SQL modifiers like type or key commands
        (e.g., ['table_id INT PRIMARY KEY', 'some_data TEXT', 'price INT'])
        :type fields: list
        :return:
        """
        fields = ", ".join(fields)
        try:
            self.broker_cursor.execute("CREATE TABLE %s (%s)" % (table_name, fields))
        except sqlite3.OperationalError:
            pass
        return

    def _broker_loop(self, queue):  # The queue must be passed in explicitly because the process is being spun off
        while True:
            if not queue.empty():
                query = queue.get()
                if query['mode'] == 'sql':
                    pipe = query['pipe']
                    locked_counter = 0
                    while True:
                        try:
                            dummy_func()
                            self.broker_cursor.execute(query['sql'], query['values'])
                            self.connection.commit()
                        except sqlite3.OperationalError as err:
                            if "database is locked" in str(err):
                                # Wait for database to become free
                                if locked_counter > self.lock_wait_time * 5:
                                    print("Failed query: %s" % query['sql'])
                                    raise err
                                locked_counter += 1
                                sleep(.2)
                                continue
                            else:
                                print("Failed query: %s" % query['sql'])
                                raise err
                        break
                    response = self.broker_cursor.fetchall()
                    pipe.send(json.dumps(response))
                elif query['mode'] == 'stop':
                    break
                else:
                    raise RuntimeError("Broker instruction '%s' not understood." % query['mode'])

    def start_broker(self):
        if not self.broker:
            self.broker = Process(target=self._broker_loop, args=[self.broker_queue])
            self.broker.daemon = True
            self.broker.start()
        return

    def stop_broker(self):
        self.broker_queue.put({'mode': 'stop'})
        while self.broker.is_alive():
            pass  # Don't move on until the broker is all done doing whatever it might be doing
        return

    def query(self, sql, values=None, errors=True):
        """
        :param sql: SQL string
        :param values: If question marks are used in SQL command, pass in replacement values as tuple
        :param errors: Suppress raised errors by passing in False
        :return: 
        """
        if not self.broker:
            raise RuntimeError("Broker not running. Use the 'start_broker()' method before calling query().")

        values = () if not values else values
        recvpipe, sendpipe = Pipe(False)
        valve = br.SafetyValve(150)
        while True:
            valve.step("To many threads being called, tried for 5 minutes but couldn't find an open thread.")
            try:
                dummy_func()
                self.broker_queue.put({'mode': 'sql', 'sql': sql, 'values': values, 'pipe': sendpipe})
                break
            except (sqlite3.Error, sqlite3.OperationalError, sqlite3.IntegrityError, sqlite3.DatabaseError) as err:
                if errors:
                    raise err
            except RuntimeError as err:
                if "can't start new thread" in str(err):
                    sleep(2)
                else:
                    raise err
        response = json.loads(recvpipe.recv())
        return response

    def iterator(self, sql):  # Note that this does not run through the broker
        temp_cursor = self.connection.cursor()
        query_result = temp_cursor.execute(sql)
        while True:
            fetched = query_result.fetchone()
            if not fetched:
                break
            else:
                yield fetched

    def close(self):
        self.stop_broker()
        self.connection.close()
        return
예제 #51
0
파일: ami.py 프로젝트: ATRAN2/Futami
class Ami:
    def __init__(self, request_queue, response_queue):
        self.request_queue = request_queue
        self.response_queue = response_queue
        self.update_request_queue = SimpleQueue()

        Process(
            target=self.update_loop,
            name='periodic api worker',
            args=(response_queue, self.update_request_queue),
        ).start()

        logger.debug("initialization complete")

        self.request_loop()

    def proxy_exception_to(instance_attribute_exception_proxy_queue):
        def _proxy_exception(f):
            """This isn't your normal-looking function.
            """
            @wraps(f)
            def wrapper(self, *args, **kwargs):
                try:
                    return f(self, *args, **kwargs)
                except BaseException as ex:
                    queue = getattr(self, instance_attribute_exception_proxy_queue)
                    tb = traceback.format_exc()
                    this_process = current_process()
                    queue.put(StoredException(tb, this_process.name))
            return wrapper
        return _proxy_exception

    # Loop to handle fast part of LoadAndFollow and other requests from IRC
    @proxy_exception_to("response_queue")
    def request_loop(self):
        # The identifier argument is an opaque
        # identifier used by the queue client in some situations.
        while True:
            request = self.request_queue.get()
            logger.debug("Got request {}".format(request))

            if request.action is Action.LoadAndFollow:
                if isinstance(request.target, BoardTarget):
                    # Download all threads
                    board = request.target.board
                    threads = self.get_board(board)

                    # Seed seen_boards so update_loop doesn't re-fetch them
                    self.update_request_queue.put(SubscriptionUpdate.make(
                        action=Action.InternalQueueUpdate,
                        target=request.target,
                        payload={thread['no']: thread['last_modified'] for thread in threads},
                    ))

                    threads.sort(key=itemgetter('last_modified'))

                    # Download all thread content so we can get the OP
                    for thread in threads:
                        posts = list(self.get_thread(board, thread['no']))
                        op = posts[0]
                        op.payload = request.payload

                        self.response_queue.put(op)

                elif isinstance(request.target, ThreadTarget):
                    posts = list(self.get_thread(
                        request.target.board,
                        request.target.thread
                    ))

                    self.update_request_queue.put(SubscriptionUpdate.make(
                        action=Action.InternalQueueUpdate,
                        target=request.target,
                        payload=posts,
                    ))

                    for post in posts:
                        post.payload = request.payload

                        self.response_queue.put(post)

    @retry
    def get_board(self, board):
        url = THREAD_LIST.format(board=board)
        pages = requests.get(url).json()
        threads = list(flatten([page['threads'] for page in pages]))
        return threads

    @retry
    def get_thread(self, board, thread):
        url = THREAD.format(board=board, thread=thread)
        posts = requests.get(url).json()['posts']

        for post in posts:
            post['board'] = board

        posts = map(Post, posts)

        return posts

    # Timed loop to hit 4chan API
    @proxy_exception_to("response_queue")
    def update_loop(self, response_queue, update_request_queue):
        # Set of boards that are watched
        watched_boards = set()
        # Dictionary of board => set of threads(string) that are watched
        watched_threads = defaultdict(set)

        # Dictionary of board => {thread_no => last_modified} last seen on board
        seen_boards = defaultdict(dict)
        # Dictionary of board, thread => posts last seen on thread
        seen_threads = defaultdict(lambda: defaultdict(list))

        while True:
            # Process pending update requests
            while not update_request_queue.empty():
                request = update_request_queue.get()
                if request.action is Action.InternalQueueUpdate:
                    if isinstance(request.target, BoardTarget):
                        watched_boards.add(request.target.board)
                        seen_boards[board] = request.payload
                    elif isinstance(request.target, ThreadTarget):
                        # assert request.target.board in watched_boards, "Asked to watch a thread of a board not currently being watched"
                        watched_threads[request.target.board].add(request.target.thread)
                        seen_threads[request.target.board][request.target.thread] = request.payload

            # Fetch pending boards
            pending_boards = defaultdict(dict)
            for board in watched_boards:
                pending_boards[board] = {
                    thread['no']:
                    thread['last_modified'] for thread in self.get_board(board)
                }

            to_delete = []
            for board, threads in pending_boards.items():
                for thread_no, last_modified in threads.items():
                    if thread_no not in seen_boards[board]:
                        thread = list(self.get_thread(board, thread_no))[0]
                        logger.debug("sending new thread {}".format(thread))
                        response_queue.put(thread)
                    elif last_modified > seen_boards[board][thread_no]:
                        thread = list(self.get_thread(board, thread_no))[0]
                        logger.debug("sending updated thread {}".format(thread))
                        response_queue.put(thread)
                    elif last_modified < seen_boards[board][thread_no]:
                        # Sometimes we get stale data immediately after reading
                        # it (tested under SLEEP_TIME = 3). Ignore this data.
                        to_delete.append((board, thread_no))

            for board, thread_no in to_delete:
                del pending_boards[board][thread_no]

            seen_boards = pending_boards

            # Fetch pending threads
            pending_threads = defaultdict(lambda: defaultdict(list))
            for board, threads in watched_threads.items():
                for thread in threads:
                    pending_threads[board][thread] = list(self.get_thread(board, thread))

            for board, threads in pending_threads.items():
                for thread_no, posts in threads.items():
                    for post in posts:
                        if post not in seen_threads[board][thread_no]:
                            logger.debug("sending new post {}".format(post))
                            response_queue.put(post)

            seen_threads = pending_threads

            sleep(SLEEP_TIME)
예제 #52
0
def EncryptionTaskGenerationProcess(
        chunkEncryptionQueue: multiprocessing.SimpleQueue,
        blobRemoveQueue: multiprocessing.SimpleQueue,
        databaseUpdateQueue: multiprocessing.SimpleQueue,
        databaseFileName: str, path: str) -> None:
    def isFileChanged(database: Database, path: str) -> bool:
        # File is "changed" if it is not in database
        fileInfo = database.getFile(path)
        if fileInfo is None:
            return True

        # Consider file is not changed if modification time is the same
        stat = os.stat(path)
        if fileInfo.stats['mtime'] == stat.st_mtime:
            return False

        # File is changed if file size is different
        if fileInfo.decryptedSize != stat.st_size:
            return True

        # Check actual content of the file by read in chunk by chunk
        fp = open(path, 'rb')
        for index, chunk in enumerate(
                iter(lambda: fp.read(1024 * 1024 * 32), bytes(0))):
            blobInfo = database.getBlob(fileInfo.blobIds[index])
            assert blobInfo is not None

            # File is changed if chunk hash is different
            if blobInfo.decryptedSha256 != hashlib.sha256(chunk).digest():
                return True

        # Otherwise the file is definitely not changed
        return False

    # Set process title
    setproctitle.setproctitle('EncryptionTaskGenerationProcess')

    # Get a transient database
    database = Database.getTransientCopy(databaseFileName)

    # Convert to absolute path
    path = os.path.abspath(path)

    # Walk over all files under path
    # Use path itself if it is a file
    for dirPath, _, fileNames in [('', None, [path])
                                  ] if os.path.isfile(path) else os.walk(path):
        # Update process title
        setproctitle.setproctitle(
            'EncryptionTaskGenerationProcess {}, {} files'.format(
                dirPath, len(fileNames)))

        fileNames.sort()

        for fileName in fileNames:
            filePath = os.path.join(dirPath, fileName)

            # If file is not changed, then only update file modification time
            if not isFileChanged(database, filePath):
                # If mtime is different, send update mtime command to queue
                if database.getFile(filePath).stats['mtime'] != os.stat(
                        filePath).st_mtime:
                    databaseUpdateQueue.put(['UpdateMtime', filePath])
                continue

            # Otherwise backup the file
            # If file is present in database, remove existing blobs
            fileInfo = database.getFile(filePath)
            if fileInfo is not None:
                for blobId in fileInfo.blobIds:
                    # Send blob name to blob remove process
                    blobInfo = database.getBlob(blobId)
                    blobRemoveQueue.put([blobInfo.name])

                    # Send remove blob command to queue
                    databaseUpdateQueue.put(['RemoveBlob', blobId])

            # Send chunk info to chunk encryption process
            fileSize = os.stat(filePath).st_size
            for offset in range(0, fileSize, 1024 * 1024 * 32):
                chunkEncryptionQueue.put([filePath, offset, fileSize])

    # Close transient database
    database.close()