예제 #1
0
    def get_novel_dict(self, chapter_url_list: list) -> dict:
        result = self.downloader.get_result(chapter_url_list)
        result.show_time_cost()
        result.show_urls_status()
        print(" 重试失败章节 ".center(shutil.get_terminal_size().columns - 7, '*'))
        result.retry_failed_urls()
        result.show_urls_status()

        print(" 分离章节内容 ".center(shutil.get_terminal_size().columns - 7, '*'))
        process_number = self.downloader.config.get_config(
            "multi", "process_number")
        process_number = int(process_number //
                             1.5) if process_number > 2 else process_number
        queue = SimpleQueue()
        for i in range(process_number):
            Process(target=self.fill_novel_dict,
                    args=(chapter_url_list[i::process_number],
                          result.get_urls_detail_dict(), queue)).start()
        for i in tqdm(range(len(chapter_url_list)),
                      total=len(chapter_url_list),
                      desc="分离章节内容",
                      unit="章节",
                      postfix={"process": process_number}):
            queue.get()

        return result.get_urls_detail_dict()
예제 #2
0
    def merge_db(db_folder, new_db_name, db_to_merge):

        assert path.exists(
            db_folder
        ), '`{}` is a wrong path to db folder, please correct it.'.format(
            db_folder)

        shutdown = Event()
        writer_queue = SimpleQueue()

        writer = Writer(db_folder=db_folder,
                        db_name=new_db_name,
                        queue=writer_queue,
                        shutdown=shutdown)
        reader = Reader(db_folder=db_folder,
                        db_to_merge=db_to_merge,
                        queue=writer_queue,
                        shutdown=shutdown)

        reader.start()
        writer.start()

        pbar = tqdm(total=len(db_to_merge))

        c = 0
        while not shutdown.is_set():
            try:
                new_c = writer.counter.value
                progress = new_c - c
                if progress > 0:
                    pbar.update(progress)
                    c = new_c
                Event().wait(2)

            except KeyboardInterrupt:
                print()
                print("Main thread grab the keyboard interrupt")
                break

        shutdown.set()
        pbar.close()

        if writer.is_alive():

            print("Waiting writer...")
            writer.join()

        print("WRITER EXECUTED.")

        if reader.is_alive():
            writer_queue.get()
            print("Waiting reader...")
            reader.join()

        print("READER EXECUTED.")

        print()
        print("Done.")
예제 #3
0
class Communicator:
    def __init__(self):
        self.transmit_queue = SimpleQueue()
        self.operations_queue = SimpleQueue()
        self.client_queue = SimpleQueue()
        self.emitter_queue = SimpleQueue()

    def read_operations(self):
        return self.operations_queue.get()

    def add_operation(self, operation):
        self.operations_queue.put([NEW_DATA, IN_PROCESS, operation.data()])

    def finish_operation(self, operation):
        self.operations_queue.put([NEW_DATA, FINISHED, operation.data()])

    def finish_task(self, operation, task):
        self.operations_queue.put(
            [NEW_DATA, TASK_FINISHED,
             operation.data(),
             task.data()])

    def sent_task(self, operation, task):
        self.operations_queue.put(
            [NEW_DATA, TASK_SENT,
             operation.data(),
             task.data()])

    def stop_operations(self):
        self.operations_queue.put([STOP])

    def read_transmit(self):
        return self.transmit_queue.get()

    def stop_transmit(self):
        self.transmit_queue.put([STOP])

    def notify_end_task(self, operation, task):
        self.transmit_queue.put([NEW_DATA, operation.data(), task.data()])

    def get_current_credits(self):
        self.operations_queue.put([NEW_DATA, CREDITS])

        return self.client_queue.get()

    def notify_current_credits(self, credits_):
        self.client_queue.put(credits_)

    def read_emitter(self):
        return self.emitter_queue.get()

    def emit(self, to, data, callback=None):
        self.emitter_queue.put([NEW_DATA, to, data, callback])

    def stop_emitter(self):
        self.emitter_queue.put([STOP])
예제 #4
0
def merge_db(db_folder, new_db_name, db_to_merge):

    assert path.exists(db_folder), '`{}` is a wrong path to db folder, please correct it.'.format(db_folder)

    shutdown = Event()
    writer_queue = SimpleQueue()

    writer = Writer(db_folder=db_folder, db_name=new_db_name, queue=writer_queue, shutdown=shutdown)
    reader = Reader(db_folder=db_folder, db_to_merge=db_to_merge,
                    queue=writer_queue, shutdown=shutdown)

    reader.start()
    writer.start()

    pbar = tqdm(total=len(db_to_merge))

    c = 0
    while not shutdown.is_set():
        try:
            new_c = writer.counter.value
            progress = new_c - c
            if progress > 0:
                pbar.update(progress)
                c = new_c
            Event().wait(2)

        except KeyboardInterrupt:
            print()
            print("Main thread grab the keyboard interrupt")
            break

    shutdown.set()
    pbar.close()
    # writer.join()
    # reader.join()

    print("writer alive", writer.is_alive())
    print("reader alive", reader.is_alive())

    if writer.is_alive():

        print("Waiting writer...")
        writer.join()

    print("WRITER EXECUTED")

    if reader.is_alive():
        print("Waiting reader...")
        writer_queue.get()
        print("Waiting reader 2...")
        reader.join()

    print("READER EXECUTED")

    print("Done.")
예제 #5
0
def learning_loop(exit_flag: mp.Value,
                  gvfs: Sequence[Sequence[Learner]],
                  behaviour_gvf: SARSA,
                  main2gvf: mp.SimpleQueue,
                  gvf2main: mp.SimpleQueue,
                  gvf2plot: mp.SimpleQueue):
    action, action_prob, obs, x = None, None, None, None

    # get first state
    while exit_flag.value == 0 and obs is None:
        while exit_flag.value == 0 and main2gvf.empty():
            time.sleep(0.001)
        if exit_flag.value == 0:
            obs, x = main2gvf.get()
            action, action_prob = behaviour_gvf.policy(obs=obs, x=x)
            gvf2main.put(action)

    # main loop
    while exit_flag.value == 0:
        while exit_flag.value == 0 and main2gvf.empty():
            time.sleep(0.001)
        if exit_flag.value:
            break

        # get data from servos
        obsp, xp = main2gvf.get()
        actionp, action_probp = behaviour_gvf.policy(obs=obsp, x=xp)

        # update weights
        for g in chain.from_iterable(gvfs):
            g.update(x, obs,
                     action, action_prob,
                     xp, obsp,
                     actionp, action_probp)

        # send action
        gvf2main.put(actionp)

        # send data to plots
        gdata = [[g.data(x, obs, action, xp, obsp)
                  for g in gs]
                 for gs in gvfs]
        data = dict(ChainMap(*chain.from_iterable(gdata)))
        data['obs'] = obs
        gvf2plot.put(data)

        # go to next state
        obs = obsp
        x = xp
        action = actionp
        action_prob = action_probp

    print('Done learning!')
예제 #6
0
def learning_loop(exit_flag: mp.Value,
                  gvfs: Sequence[Sequence[GTDLearner]],
                  main2gvf: mp.SimpleQueue,
                  gvf2plot: mp.SimpleQueue):
    action, action_prob, obs, x = None, None, None, None

    # get first state
    while exit_flag.value == 0 and obs is None:
        while exit_flag.value == 0 and main2gvf.empty():
            time.sleep(0.001)
        if exit_flag.value == 0:
            action, action_prob, obs, x = main2gvf.get()

    i = 1

    # main loop
    while exit_flag.value == 0:
        while exit_flag.value == 0 and main2gvf.empty():
            time.sleep(0.001)
        if exit_flag.value:
            break

        i += 1
        ude = False
        rupee = False
        if 5000 < i < 5100:
            ude = True
        if i == 7000:
            rupee = True

        # get data from servos
        actionp, action_probp, obsp, xp = main2gvf.get()

        # update weights
        for gs, xi, xpi in zip(gvfs, x, xp):
            for g in gs:
                g.update(action, action_prob, obs, obsp, xi, xpi, ude, rupee)

        # send data to plots
        gdata = [[g.data(xi, obs, action, xpi, obsp)
                  for g in gs]
                 for gs, xi, xpi in zip(gvfs, x, xp)]
        data = dict(ChainMap(*chain.from_iterable(gdata)))
        data['obs'] = obs
        gvf2plot.put(data)

        # go to next state
        obs = obsp
        x = xp
        action = actionp
        action_prob = action_probp

    print('Done learning!')
    def start(self):
        t1 = time.time()
        downloader = Downloader(
            Config("hardworking_av_studio.ini", HARDWORKING_CONFIG_DICT,
                   HARDWORKING_CONFIG_SCHEMA))
        urls = ['https://www.dmmsee.zone/studio/0']
        urls.extend([
            'https://www.dmmsee.zone/studio/{}{}'.format(i, word)
            for i in range(1, 40) for word in ' ' + string.ascii_lowercase
        ])
        urls.extend([
            'https://www.dmmsee.zone/studio/{}'.format(i)
            for i in range(40, 400)
        ])
        print(" config ".center(shutil.get_terminal_size().columns - 1, '*'))
        downloader.config.list_config()
        print(" download urls ".center(shutil.get_terminal_size().columns - 1,
                                       '*'))
        self.result = downloader.get_result(urls)
        self.result.show_time_cost()
        self.result.show_urls_status()
        print(" retry failed urls ".center(
            shutil.get_terminal_size().columns - 1, '*'))
        self.result.retry_failed_urls()
        self.result.show_urls_status()

        if os.path.exists("hardworking_av_studio.txt"):
            os.remove("hardworking_av_studio.txt")

        print(" analyzing result ".center(
            shutil.get_terminal_size().columns - 1, '*'))
        tmp_time = time.time()
        analyzing_result_process_number = downloader.config.get_config(
            'multi', 'analyzing_result_process_number')
        queue = SimpleQueue()
        for i in range(analyzing_result_process_number):
            Process(target=self.screen_by_mini_date,
                    args=(self.result.get_finished_urls()
                          [i::analyzing_result_process_number],
                          queue)).start()
        for i in tqdm(range(len(self.result.get_finished_urls())),
                      total=len(self.result.get_finished_urls()),
                      desc="analyzing result",
                      unit="result",
                      postfix={"process": analyzing_result_process_number}):
            queue.get()
        print("\nanalysis completed... time cost {:.2f}s".format(time.time() -
                                                                 tmp_time))
        print(" result ".center(shutil.get_terminal_size().columns, '*'))
        print("The result has been written to the current folder:",
              os.path.join(os.getcwd(), "hardworking_av_studio.txt"))
        print("total time cost {:.2f}s".format(time.time() - t1))
        return True
예제 #8
0
    def _fit(self, X, y, blocks):
        """Fit base clustering estimators on X."""
        self.blocks_ = blocks

        processes = []
        # Here the blocks will be passed to subprocesses
        data_queue = SimpleQueue()
        # Here the results will be passed back
        result_queue = SimpleQueue()
        for x in range(self.n_jobs):
            processes.append(
                mp.Process(target=_parallel_fit,
                           args=(self.fit_, self.partial_fit_,
                                 self.base_estimator, self.verbose, data_queue,
                                 result_queue)))
            processes[-1].start()

        # First n_jobs blocks are sent into the queue without waiting for the
        # results. This variable is a counter that takes care of this.
        presend = 0
        blocks_computed = 0
        blocks_all = len(np.unique(blocks))

        for block in self._blocks(X, y, blocks):
            if presend >= self.n_jobs:
                b, clusterer = result_queue.get()
                blocks_computed += 1
                if clusterer:
                    self.clusterers_[b] = clusterer
            else:
                presend += 1
            if self.partial_fit_:
                if block[0] in self.clusterers_:
                    data_queue.put(('middle', block, self.clusterers_[b]))
                    continue

            data_queue.put(('middle', block, None))

        # Get the last results and tell the subprocesses to finish
        for x in range(self.n_jobs):
            if blocks_computed < blocks_all:
                print("%s blocks computed out of %s" %
                      (blocks_computed, blocks_all))
                b, clusterer = result_queue.get()
                blocks_computed += 1
                if clusterer:
                    self.clusterers_[b] = clusterer

        data_queue.put(('end', None, None))

        time.sleep(1)

        return self
예제 #9
0
파일: blocking.py 프로젝트: MSusik/beard
    def _fit(self, X, y, blocks):
        """Fit base clustering estimators on X."""
        self.blocks_ = blocks

        processes = []
        # Here the blocks will be passed to subprocesses
        data_queue = SimpleQueue()
        # Here the results will be passed back
        result_queue = SimpleQueue()
        for x in range(self.n_jobs):
            processes.append(mp.Process(target=_parallel_fit, args=(self.fit_,
                             self.partial_fit_, self.base_estimator,
                             self.verbose, data_queue, result_queue)))
            processes[-1].start()

        # First n_jobs blocks are sent into the queue without waiting for the
        # results. This variable is a counter that takes care of this.
        presend = 0
        blocks_computed = 0
        blocks_all = len(np.unique(blocks))

        for block in self._blocks(X, y, blocks):
            if presend >= self.n_jobs:
                b, clusterer = result_queue.get()
                blocks_computed += 1
                if clusterer:
                    self.clusterers_[b] = clusterer
            else:
                presend += 1
            if self.partial_fit_:
                if block[0] in self.clusterers_:
                    data_queue.put(('middle', block, self.clusterers_[b]))
                    continue

            data_queue.put(('middle', block, None))

        # Get the last results and tell the subprocesses to finish
        for x in range(self.n_jobs):
            if blocks_computed < blocks_all:
                print("%s blocks computed out of %s" % (blocks_computed,
                                                        blocks_all))
                b, clusterer = result_queue.get()
                blocks_computed += 1
                if clusterer:
                    self.clusterers_[b] = clusterer

        data_queue.put(('end', None, None))

        time.sleep(1)

        return self
예제 #10
0
    def modbus_start(self, ip, port):
        queue_receive = SimpleQueue()
        queue_send = SimpleQueue()
        Process(target=server.run_callback_server,
                args=(ip, port, queue_receive, queue_send)).start()

        while True:
            device, value = queue_receive.get()
            print("Registrador: %s -> Valor: %s" % (device, value))
            print(device, value)
            regType = device[0:2]
            regNum = device[2]

            # Bool enviados para o cliente (python -> labview)
            if (regType == "di"):
                queue_send.put([1, 0, 0, 1, 1, 0, 0, 1])

            # Bool recebidos do cliente (labview -> python)
            if (regType == "co"):
                if (value[0] == 1):
                    self.measure.emit()

            # Int enviados para o cliente (python -> labview)
            if (regType == "ir"):
                self.inputReg.emit(regNum)
                # tempo para garantir que a variavel vai ser atualizada
                sleep(0.1)
                queue_send.put([self.inputRegData])

            # Int recebido do cliente (labview -> python)
            if (regType == "hr"):
                self.holdReg.emit(regNum, value[0])
예제 #11
0
    def processDatabaseUpdate(
            databaseFileName: str,
            databaseUpdateQueue: multiprocessing.SimpleQueue) -> None:
        # Open database
        database = Database(databaseFileName)

        # Process updates
        while not databaseUpdateQueue.empty():
            # Get a task
            task = databaseUpdateQueue.get()
            taskName = task[0]

            # Update modification time
            if taskName == 'UpdateMtime':
                filePath = task[1]

                # Get file info
                fileInfo = database.getFile(filePath)
                # Update mtime
                fileInfo.stats['mtime'] = os.stat(filePath).st_mtime
                # Update database record
                database.setFile(fileInfo)

            # Remove a blob
            if taskName == 'RemoveBlob':
                blobId = task[1]

                # Remove blob
                database.removeBlob(blobId)

        # Close database
        database.commit()
        database.close()
예제 #12
0
def BlobRemoveProcess(blobRemoveQueue: multiprocessing.SimpleQueue,
                      credentials: str, bucketName: str) -> None:
    # Get GCS bucket
    credentials = Credentials.from_service_account_info(
        json.loads(credentials))
    client = storage.Client(project=credentials.project_id,
                            credentials=credentials)
    bucket = client.get_bucket(bucketName)

    # Process tasks until received None
    while True:
        # Set process title
        setproctitle.setproctitle('BlobRemoveProcess')

        # Get task
        task = blobRemoveQueue.get()
        if task is None:
            break

        # Extract task
        name: str = task[0]

        # Update process title
        setproctitle.setproctitle('BlobRemoveProcess {}'.format(name))

        # Remove blob
        try:
            bucket.delete_blob(name)
        except:
            print('Exception while deleting blob {}'.format(name))
예제 #13
0
class Network:

	def __init__(self):
		self.cmd_queue = SimpleQueue()
		self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)

		print(colored('[STCK]', 'grey'), colored('Network: Initing.', 'white'))
	def connect(self, server_ip, server_port=8200):
		self.sock.connect((server_ip, server_port))

		print(colored('[STCK]', 'grey'), colored('Network: Connecting.', 'white'))
	def send(self, cmd):
		self.sock.send(('%s\n' % cmd).encode())
		#print('Network: Send %s' % cmd)

	def receive(self):
		recvData = self.sock.recv(1024).decode("utf8").split('\n')
		for i in range(len(recvData)):
			self.cmd_queue.put(recvData[i])

	def nextCmd(self):
		return self.cmd_queue.get()

	def hasCmd(self):
		return not self.cmd_queue.empty()

	def disconnect(self):
		print("Network: Closed")
		self.sock.close()
예제 #14
0
    def worker(db, log_queue: multiprocessing.SimpleQueue, internal_log) -> None:
        db.connect()

        while True:
            item = log_queue.get()
            if item is None:
                break

            try:
                db.ensure_connected()
                db.insert_into('logs', item)
            except Exception as e:
                sleep_for = 10

                with internal_log.error() as target:
                    target.write('Could not save to database caused by: {0} {1}\n'.format(type(e), str(e)))
                    if 'open' in dir(db.conn):
                        target.write('Database handle state: {}\n'.format(db.conn.open))
                    target.write('Stack trace: ' + traceback.format_exc())
                    target.write('Current log: ')
                    json.dump(item, target)
                    target.write('\n\n')
                    target.write('Retry in {} s\n'.format(sleep_for))

                sleep(sleep_for)

                with internal_log.error() as target:
                    target.write('Retrying now.\n'.format(sleep_for))
                    log_queue.put(item)
예제 #15
0
 def call(*args, **kwargs):
     terminate = Event()
     queue = SimpleQueue()
     
     process = IProcess(target=Main,
                     args=(terminate, queue, func, args, kwargs),
                     daemon=daemon)
     process.start()
     
     #Get results/errors
     result, exception = queue.get()
     
     process.join(timeout=terminate)
     while process.is_alive():
         process.terminate()
     
     #catch errors raised by the callback
     try:
         if callback is not None:
             callback(*cb_args, **cb_kwargs)
     except:
         if exception:
             result += (_format_tb(*sys.exc_info()),)
         else:
             result, exception = (_format_tb(*sys.exc_info()),), True
     
     if exception:
         _raise_tb_stack(result)
     else:
         return result
예제 #16
0
def compile_model(model_file, output_file=None):
    """  Compile network in separate thread

    To avoid initialising Theano in main thread, compilation must be done in a
    separate process. The reason for avoidance is that forking a process after
    CUDA context is initialised is not supported, so, if this is a multiprocess
    run, processes must be created before importing theano.sandbox.cuda.

    Where the network is already compiled, a temporary copy is created.

    :param model_file: File to read network from
    :param output_file: File to output to.  If None, generate a filename

    :returns: A filename containing a compiled network.
    """
    queue = SimpleQueue()
    p = Process(target=_compile_model, args=(queue, model_file, output_file))
    p.start()
    p.join()
    if p.exitcode != 0:
        output_file = None
        raise ValueError("Model file {} was neither a network nor compiled network".format(model_file))
    else:
        output_file = queue.get()

    return output_file
예제 #17
0
def init_worker(status_queue: multiprocessing.SimpleQueue,
                param_queue: multiprocessing.SimpleQueue,
                result_queue: multiprocessing.SimpleQueue) -> None:
    global result
    global coverage_run
    global py_hash_secret
    global py_random_seed

    # Make sure the generator is re-seeded, as we have inherited
    # the seed from the parent process.
    py_random_seed = random.SystemRandom().randbytes(8)
    random.seed(py_random_seed)

    result = ChannelingTestResult(result_queue)
    if not param_queue.empty():
        server_addr, backend_dsn = param_queue.get()

        if server_addr is not None:
            os.environ['EDGEDB_TEST_CLUSTER_ADDR'] = json.dumps(server_addr)
        if backend_dsn:
            os.environ['EDGEDB_TEST_BACKEND_DSN'] = backend_dsn

    os.environ['EDGEDB_TEST_PARALLEL'] = '1'
    coverage_run = devmode.CoverageConfig.start_coverage_if_requested()
    py_hash_secret = cpython_state.get_py_hash_secret()
    status_queue.put(True)
예제 #18
0
def work(version_id, computer_id, index, size, busy_queue: SimpleQueue,
         result_queue: SimpleQueue, db: Database):
    time_limit = 100
    while True:
        agents, waypoints = busy_queue.get()

        start_time = time.time()
        run_id, grid_data = db.get_grid(version_id, computer_id, index,
                                        time_limit, agents, waypoints, size,
                                        20)
        overhead = time.time() - start_time

        res = None
        error = None

        try:
            res = func_timeout(time_limit,
                               run_single_from_data,
                               args=(grid_data, ))
        except FunctionTimedOut:
            pass
        except Exception as e:
            error = e

        result_queue.put((index, run_id, res, error, overhead))
        busy_queue.put((agents, waypoints))
예제 #19
0
    def run(self, tasks, render, update, render_args=(), render_kwargs={}, update_args=(), update_kwargs={}):

        # establish ipc queues using a manager process
        task_queue = SimpleQueue()
        result_queue = SimpleQueue()

        # start process to generate image samples
        producer = Process(target=self._producer, args=(tasks, task_queue))
        producer.start()

        # start worker processes
        workers = []
        for pid in range(self._processes):
            p = Process(target=self._worker, args=(render, render_args, render_kwargs, task_queue, result_queue))
            p.start()
            workers.append(p)

        # consume results
        for _ in tasks:
            result = result_queue.get()
            update(result, *update_args, **update_kwargs)

        # shutdown workers
        for _ in workers:
            task_queue.put(None)
예제 #20
0
def test_fastq_removal():

    inq = SimpleQueue()
    outq = SimpleQueue()

    with open(test_duplicates_path) as r:
        duplicates = ujson.load(r)
        duplicates_set = set(duplicates)


    rdrp = ReadDuplicateRemovalProcess(inq=inq, 
                                       outq=outq, 
                                       duplicated_ids=duplicates_set,
                                       )

    rdrp.start()
    
    inq.put(test_data)
    inq.put('END')
    result = outq.get()

    rdrp.join()
    rdrp.terminate()


    # Correct number of duplicates removed
    assert len(result)  == len(test_data_dedup)
    
    test_not_duplicated = [(r1.name, r2.name) for r1, r2 in result]
    expected_not_duplicated = [(r1.name, r2.name) for r1, r2 in test_data_dedup]

    # Correct duplicate names removed
    assert test_not_duplicated == expected_not_duplicated
예제 #21
0
def test_tracer_usage_multiprocess():
    q = MPQueue()

    # Similar to test_multiprocess(), ensures that no collisions are
    # generated between parent and child processes while using
    # multiprocessing.

    # Note that we have to be wary of the size of the underlying
    # pipe in the queue: https://bugs.python.org/msg143081

    def target(q):
        ids_list = list(
            chain.from_iterable((s.span_id, s.trace_id) for s in [tracer.start_span("s") for _ in range(10)])
        )
        q.put(ids_list)

    ps = [mp.Process(target=target, args=(q,)) for _ in range(30)]
    for p in ps:
        p.start()

    for p in ps:
        p.join()

    ids_list = list(chain.from_iterable((s.span_id, s.trace_id) for s in [tracer.start_span("s") for _ in range(100)]))
    ids = set(ids_list)
    assert len(ids) == len(ids_list), "Collisions found in ids"

    while not q.empty():
        child_ids_list = q.get()
        child_ids = set(child_ids_list)

        assert len(child_ids) == len(child_ids_list), "Collisions found in subprocess ids"

        assert ids & child_ids == set()
        ids = ids | child_ids  # accumulate the ids
예제 #22
0
def importData(simulator):
    testSim = simulator
    q = SimpleQueue()
    jobs = []
    PERIOD_SIZE = 50
    BATCH_SIZE = 100
    BATCH_COUNT = 1
    BATCH_OFFSET = 100
    dates = testSim.getAllDates()
    index = list(range(BATCH_COUNT))
    feed = []
    threads = 16

    running = False
    count = 0
    while 1:
        if count < threads:
            for i in random.sample(index, threads-count if len(index) >= threads-count else len(index)):
                p = Process(target=testSim.processTimePeriod, args=(q, PERIOD_SIZE, dates, BATCH_SIZE * (i + BATCH_OFFSET) + PERIOD_SIZE, BATCH_SIZE))
                jobs.append(p)
                p.start() 
                index.remove(i)
        count = 0
        for p in jobs:
            if not p.is_alive():
                p.terminate()
                jobs.remove(p)
            else:
                count += 1
        while not q.empty():
            print('Getting')
            feed.append(q.get())
        if count == 0 and len(index) == 0: 
            break
    return feed
예제 #23
0
def _pin_memory_loop(in_queue: multiprocessing.SimpleQueue,
                     out_queue: queue.Queue, done_event: threading.Event):
    while True:
        try:
            r = in_queue.get()
        except Exception:
            if done_event.is_set():
                return
            raise
        if r is None:
            break
        if isinstance(r[1], torch_loader.ExceptionWrapper):
            out_queue.put(r)
            continue

        idx, batch_content = r
        batch_indices = batch_content[0]
        batch = batch_content[1:]

        try:
            batch = torch_loader.pin_memory_batch(batch)
        except Exception:
            out_queue.put((idx, torch_loader.ExceptionWrapper(sys.exc_info())))
        else:
            out_queue.put((idx, [batch_indices] + batch))
예제 #24
0
def test_span_api_fork():
    q = MPQueue()
    pid = os.fork()

    if pid > 0:
        # parent
        parent_ids_list = list(
            chain.from_iterable((s.span_id, s.trace_id) for s in [Span(None, None) for _ in range(100)])
        )
        parent_ids = set(parent_ids_list)
        assert len(parent_ids) == len(parent_ids_list), "Collisions found in parent process ids"

        child_ids_list = q.get()

        child_ids = set(child_ids_list)

        assert len(child_ids) == len(child_ids_list), "Collisions found in child process ids"
        assert parent_ids & child_ids == set()
    else:
        # child
        try:
            child_ids = list(
                chain.from_iterable((s.span_id, s.trace_id) for s in [Span(None, None) for _ in range(100)])
            )
            q.put(child_ids)
        finally:
            os._exit(0)
예제 #25
0
def test_fastq_parsing():

    inq = SimpleQueue()
    outq = SimpleQueue()

    rdpp = ReadDeduplicationParserProcess(inq=inq, 
                                          outq=outq, 
                                          save_hashed_dict_path=test_json_path)

    rdpp.start()
    
    inq.put(test_data)
    inq.put('END')
    _ = outq.get()

    rdpp.join()
    rdpp.terminate()

    with open(test_json_path) as r:
        result = ujson.load(r)


    hash_function = functools.partial(xxhash.xxh64_intdigest, seed=42)
    result_expected = {str(hash_function(r1.name + r2.name)): hash_function(r1.sequence + r2.sequence) for r1, r2 in test_data}


    assert len(result) == len(test_data)
    assert result == result_expected
    assert len({x for x in result.values()}) == 2 
예제 #26
0
def test_multiprocess():
    q = MPQueue()

    def target(q):
        assert sum((_ is _rand.seed for _ in forksafe._registry)) == 1
        q.put([_rand.rand64bits() for _ in range(100)])

    ps = [mp.Process(target=target, args=(q,)) for _ in range(30)]
    for p in ps:
        p.start()

    for p in ps:
        p.join()
        assert p.exitcode == 0

    ids_list = [_rand.rand64bits() for _ in range(1000)]
    ids = set(ids_list)
    assert len(ids_list) == len(ids), "Collisions found in ids"

    while not q.empty():
        child_ids_list = q.get()
        child_ids = set(child_ids_list)

        assert len(child_ids_list) == len(child_ids), "Collisions found in subprocess ids"

        assert ids & child_ids == set()
        ids = ids | child_ids  # accumulate the ids
예제 #27
0
def test_tracer_usage_fork():
    q = MPQueue()
    pid = os.fork()

    # Similar test to test_fork() above except we use the tracer API.
    # In this case we expect to never have collisions.
    if pid > 0:
        # parent
        parent_ids_list = list(
            chain.from_iterable((s.span_id, s.trace_id) for s in [tracer.start_span("s") for _ in range(100)])
        )
        parent_ids = set(parent_ids_list)
        assert len(parent_ids) == len(parent_ids_list), "Collisions found in parent process ids"

        child_ids_list = q.get()

        child_ids = set(child_ids_list)

        assert len(child_ids) == len(child_ids_list), "Collisions found in child process ids"
        assert parent_ids & child_ids == set()
    else:
        # child
        try:
            child_ids = list(
                chain.from_iterable((s.span_id, s.trace_id) for s in [tracer.start_span("s") for _ in range(100)])
            )
            q.put(child_ids)
        finally:
            # Kill the process so it doesn't continue running the rest of the
            # test suite in a separate process. Note we can't use sys.exit()
            # as it raises an exception that pytest will detect as an error.
            os._exit(0)
예제 #28
0
def test_fork_pid_check():
    q = MPQueue()
    pid = os.fork()

    # Generate random numbers in the parent and child processes after forking.
    # The child sends back their numbers to the parent where we check to see
    # if we get collisions or not.
    if pid > 0:
        # parent
        rns = {_rand.rand64bits(check_pid=True) for _ in range(100)}
        child_rns = q.get()

        if PYTHON_VERSION_INFO >= (3, 7):
            # Python 3.7+ have fork hooks which should be used
            # Hence we should not get any collisions
            assert rns & child_rns == set()
        else:
            # Python < 3.7 we have the pid check so there also
            # should not be any collisions.
            assert rns & child_rns == set()

    else:
        # child
        try:
            rngs = {_rand.rand64bits(check_pid=True) for _ in range(100)}
            q.put(rngs)
        finally:
            # Kill the process so it doesn't continue running the rest of the
            # test suite in a separate process. Note we can't use sys.exit()
            # as it raises an exception that pytest will detect as an error.
            os._exit(0)
예제 #29
0
    def __init__(self, queue: SimpleQueue, address: str, port: int,
                 entWin: Window):
        """Initialize a level generator

        conn: Used to communicate with the Chase
        address: Connection address to start a new mcpi connection
        port: Connection port to start a new mcpi connection
        entrance: The entrance window to begin level with
        """
        self.queue = queue
        self.mc = mmc.Minecraft.create(address, port)
        self.entWin = entWin
        self._construct()
        self.players = []
        while True:
            while not queue.empty():
                rec: Tuple[Cmd, List] = queue.get()  # New msg
                if rec[0] == Cmd.TERM:
                    self._cleanup()
                    return
                elif rec[0] == Cmd.ENT:
                    self.players.extend(rec[1])
                elif rec[0] == Cmd.EXI:
                    for i in rec[1]:
                        try:
                            self.players.remove(i)
                        except ValueError:
                            sys.stderr.write(
                                f"Player(id) {i} not found in {self}!")
            self._loop()
예제 #30
0
파일: asset.py 프로젝트: mxie91/avocado
    def run(self, runnable):
        # pylint: disable=W0201
        self.runnable = runnable
        yield self.prepare_status("started")

        name = self.runnable.kwargs.get("name")
        # if name was passed correctly, run the Avocado Asset utility
        if name is not None:
            asset_hash = self.runnable.kwargs.get("asset_hash")
            algorithm = self.runnable.kwargs.get("algorithm")
            locations = self.runnable.kwargs.get("locations")
            expire = self.runnable.kwargs.get("expire")
            if expire is not None:
                expire = data_structures.time_to_seconds(str(expire))

            cache_dirs = self.runnable.config.get("datadir.paths.cache_dirs")
            if cache_dirs is None:
                cache_dirs = settings.as_dict().get("datadir.paths.cache_dirs")

            # let's spawn it to another process to be able to update the
            # status messages and avoid the Asset to lock this process
            queue = SimpleQueue()
            process = Process(
                target=self._fetch_asset,
                args=(
                    name,
                    asset_hash,
                    algorithm,
                    locations,
                    cache_dirs,
                    expire,
                    queue,
                ),
            )
            process.start()

            while queue.empty():
                time.sleep(RUNNER_RUN_STATUS_INTERVAL)
                yield self.prepare_status("running")

            output = queue.get()
            result = output["result"]
            stdout = output["stdout"]
            stderr = output["stderr"]
        else:
            # Otherwise, log the missing package name
            result = "error"
            stdout = ""
            stderr = 'At least name should be passed as kwargs using name="uri".'

        yield self.prepare_status("running", {
            "type": "stdout",
            "log": stdout.encode()
        })
        yield self.prepare_status("running", {
            "type": "stderr",
            "log": stderr.encode()
        })
        yield self.prepare_status("finished", {"result": result})
예제 #31
0
def learning_loop(exit_flag: mp.Value, gvfs: Sequence[Sequence[Learner]],
                  behaviour_gvf: SARSA, main2gvf: mp.SimpleQueue,
                  gvf2main: mp.SimpleQueue, gvf2plot: mp.SimpleQueue):
    action, action_prob, obs, x = None, None, None, None

    # get first state
    while exit_flag.value == 0 and obs is None:
        while exit_flag.value == 0 and main2gvf.empty():
            time.sleep(0.001)
        if exit_flag.value == 0:
            obs, x = main2gvf.get()
            action, action_prob = behaviour_gvf.policy(obs=obs, x=x)
            gvf2main.put(action)

    # main loop
    while exit_flag.value == 0:
        while exit_flag.value == 0 and main2gvf.empty():
            time.sleep(0.001)
        if exit_flag.value:
            break

        # get data from servos
        obsp, xp = main2gvf.get()
        actionp, action_probp = behaviour_gvf.policy(obs=obsp, x=xp)

        # update weights
        for g in chain.from_iterable(gvfs):
            g.update(x, obs, action, action_prob, xp, obsp, actionp,
                     action_probp)

        # send action
        gvf2main.put(actionp)

        # send data to plots
        gdata = [[g.data(x, obs, action, xp, obsp) for g in gs] for gs in gvfs]
        data = dict(ChainMap(*chain.from_iterable(gdata)))
        data['obs'] = obs
        gvf2plot.put(data)

        # go to next state
        obs = obsp
        x = xp
        action = actionp
        action_prob = action_probp

    print('Done learning!')
예제 #32
0
    def _poll_config_changes(self, config_queue: SimpleQueue):
        """ Polls configuration changes from the dedicated queue and reloads filters when they happen

        :param SimpleQueue config_change_queue: queue for passing configuration changes
        """
        while True:
            config_data = config_queue.get()
            self.build_probe_config(config_data, hotswap_only=True)
            self.reload_filters()
예제 #33
0
class AudioProcessingThread(threading.Thread):
    """
    Chirp Connect audio processing thread
    """
    DEBUG_AUDIO_FILENAME = 'chirp_audio.wav'

    def __init__(self, parent=None, *args, **kwargs):
        """
        Initialise audio processing.
        In debug mode, the audio data is saved to file.
        """
        self.sdk = parent.sdk
        self.sample_size = parent.sample_size
        self.block_size = parent.block_size
        self.sample_format = parent.sample_format
        self.process_input_fn = parent.process_input_fn
        self.sample_rate = float(parent.sdk.sample_rate)

        self.block_period = self.block_size / self.sample_rate or 0.1
        self.wav_filename = parent.wav_filename or self.DEBUG_AUDIO_FILENAME
        self.input_queue = SimpleQueue()
        super(AudioProcessingThread, self).__init__(*args, **kwargs)

        if self.sdk.debug:
            import soundfile as sf
            self.wav_file = sf.SoundFile(self.wav_filename,
                                         mode='w',
                                         channels=1,
                                         samplerate=self.sdk.sample_rate)

        self.daemon = True
        self.start()

    def run(self):
        """
        Continuously process any input data from circular buffer.
        Note: We need to sleep as much as possible in this thread
        to restrict CPU usage.
        """
        while self.is_alive():

            tstart = time.time()
            while not self.input_queue.empty():
                data = self.input_queue.get()
                self.process_input_fn(data)
                if self.sdk.debug and not self.wav_file.closed:
                    self.wav_file.buffer_write(data, dtype=self.sample_size)
                self.block_period = self.block_size / self.sample_rate

            tsleep = (self.block_period - ((time.time() - tstart)))
            if tsleep > 0:
                time.sleep(tsleep)

    def stop(self):
        """ In debug mode, close wav file """
        if self.sdk.debug:
            self.wav_file.close()
예제 #34
0
파일: main.py 프로젝트: Yelp/pidtree-bcc
def main(args: argparse.Namespace):
    global EXIT_CODE
    probe_workers = []
    logging.basicConfig(
        stream=sys.stderr,
        level=logging.INFO,
        format='%(asctime)s - %(levelname)s - %(message)s',
    )
    curried_handler = partial(termination_handler, probe_workers)
    for s in HANDLED_SIGNALS:
        signal.signal(s, curried_handler)
    config_watcher = setup_config(
        args.config,
        watch_config=args.watch_config,
        min_watch_interval=args.health_check_period,
    )
    out = smart_open(args.output_file, mode='w')
    output_queue = SimpleQueue()
    probes = load_probes(
        output_queue,
        args.extra_probe_path,
        args.extra_plugin_path,
        args.lost_event_telemetry,
    )
    logging.info('Loaded probes: {}'.format(', '.join(probes)))
    if args.print_and_quit:
        for probe_name, probe in probes.items():
            print('----- {} -----'.format(probe_name))
            print(probe.expanded_bpf_text)
            print('\n')
        sys.exit(0)
    for probe in probes.values():
        probe_workers.append(
            Process(target=deregister_signals(probe.start_polling)))
        probe_workers[-1].start()
    stop_wrapper = StopFlagWrapper()
    watchdog_thread = Thread(
        target=health_and_config_watchdog,
        args=(probe_workers, out, stop_wrapper, config_watcher,
              args.health_check_period),
        daemon=True,
    )
    watchdog_thread.start()
    try:
        while True:
            print(output_queue.get(), file=out)
            out.flush()
    except RestartSignal:
        stop_wrapper.stop()
        raise
    except Exception as e:
        # Terminate everything if something goes wrong
        EXIT_CODE = 1
        logging.error('Encountered unexpected error: {}'.format(e))
        for worker in probe_workers:
            worker.terminate()
    sys.exit(EXIT_CODE)
예제 #35
0
파일: utils.py 프로젝트: Hawks12/pybuilder
def fork_process(logger, group=None, target=None, name=None, args=(), kwargs={}):
    """
    Forks a child, making sure that all exceptions from the child are safely sent to the parent
    If a target raises an exception, the exception is re-raised in the parent process
    @return tuple consisting of process exit code and target's return value
    """
    if is_windows():
        logger.warn(
            "Not forking for %s due to Windows incompatibilities (see #184). "
            "Measurements (coverage, etc.) might be biased." % target
        )
        return fake_windows_fork(group, target, name, args, kwargs)
    try:
        sys.modules["tblib.pickling_support"]
    except KeyError:
        import tblib.pickling_support

        tblib.pickling_support.install()

    q = SimpleQueue()

    def instrumented_target(*args, **kwargs):
        ex = tb = None
        try:
            send_value = (target(*args, **kwargs), None, None)
        except:
            _, ex, tb = sys.exc_info()
            send_value = (None, ex, tb)

        try:
            q.put(send_value)
        except:
            _, send_ex, send_tb = sys.exc_info()
            e_out = Exception(str(send_ex), send_tb, None if ex is None else str(ex), tb)
            q.put(e_out)

    p = Process(group=group, target=instrumented_target, name=name, args=args, kwargs=kwargs)
    p.start()
    result = q.get()
    p.join()
    if isinstance(result, tuple):
        if result[1]:
            raise_exception(result[1], result[2])
        return p.exitcode, result[0]
    else:
        msg = "Fatal error occurred in the forked process %s: %s" % (p, result.args[0])
        if result.args[2]:
            chained_message = "This error masked the send error '%s':\n%s" % (
                result.args[2],
                "".join(traceback.format_tb(result.args[3])),
            )
            msg += "\n" + chained_message
        ex = Exception(msg)
        raise_exception(ex, result.args[1])
예제 #36
0
def learning_loop(exit_flag: mp.Value,
                  gvfs: Sequence[GTDLearner],
                  main2gvf: mp.SimpleQueue,
                  gvf2plot: mp.SimpleQueue):
    action, action_prob, obs, x = None, None, None, None

    # get first state
    while exit_flag.value == 0 and obs is None:
        while exit_flag.value == 0 and main2gvf.empty():
            time.sleep(0.001)
        if exit_flag.value == 0:
            action, action_prob, obs, x = main2gvf.get()

    # main loop
    while exit_flag.value == 0:
        while exit_flag.value == 0 and main2gvf.empty():
            time.sleep(0.001)
        if exit_flag.value:
            break

        # get data from servos
        actionp, action_probp, obsp, xp = main2gvf.get()

        # update weights
        for g in gvfs:
            g.update(action, action_prob, obs, obsp, x, xp)

        # send data to plots
        data = [[obs]] + [g.data(x, obs, action, xp, obsp) for g in gvfs]
        gvf2plot.put(data)

        # go to next state
        obs = obsp
        x = xp
        action = actionp
        action_prob = action_probp
예제 #37
0
def plotting_loop(exit_flag: mp.Value,
                  gvf2plot: mp.SimpleQueue,
                  plots: Sequence[Plot]):
    while exit_flag.value == 0:
        if locks:
            print('plot gp a 1 a')
            gplock.acquire()
            print('plot gp a 1 b')
        while exit_flag.value == 0 and gvf2plot.empty():
            if locks:
                print('plot gp r 1 a')
                gplock.release()
                print('plot gp r 1 b')
            time.sleep(0.001)
            if locks:
                print('plot gp a 2 a')
                gplock.acquire()
                print('plot gp a 2 b')

        if locks:
            print('plot gp r 2 a')
            gplock.release()
            print('plot gp r 2 b')
        if exit_flag.value:
            break

        if locks:
            print('plot gp a 3 a')
            gplock.acquire()
            print('plot gp a 3 b')
        d = gvf2plot.get()
        if locks:
            print('plot gp r 3 a')
            gplock.release()
            print('plot gp r 3 b')

        for plot, data in zip(plots, d):
            plot.update(data)

    for plot in plots:
        try:
            index = np.arange(len(plot.y[0]))
            np.savetxt(f"{plot.title}.csv",
                       np.column_stack(sum(((np.asarray(y),) for y in plot.y),
                                           (index,))),
                       delimiter=',')
        except ValueError:
            continue
예제 #38
0
    def _open_frontend(self):
        from multiprocessing import Process, SimpleQueue

        connection = SimpleQueue()
        frontend = Process(
            target=self._open_frontend_process,
            args=(connection, [k for k in sys.argv[1:] if k != "--frontend"]))
        frontend.start()
        cmdline = connection.get()
        frontend.join()
        if self.interactive:
            argv_backup = list(sys.argv)
        sys.argv[1:] = cmdline.split()
        Main.setup_argv(True, True)
        if self.interactive:
            sys.argv = argv_backup
        print("Running with the following command line: %s" % sys.argv)
예제 #39
0
def plotting_loop(exit_flag: mp.Value,
                  gvf2plot: mp.SimpleQueue,
                  plots: Sequence[Plot]):

    while exit_flag.value == 0:
        while exit_flag.value == 0 and gvf2plot.empty():
            time.sleep(0.001)
        if exit_flag.value:
            break
        data = gvf2plot.get()

        for plot in plots:
            plot.update(data)

    for plot in plots:
        index = np.arange(len(plot.y[0]))
        np.savetxt(f"{plot.title}.csv",
                   sum(((np.asarray(y),) for y in plot.y), (index,)),
                   delimiter=',')
예제 #40
0
파일: _export.py 프로젝트: AtnNn/rethinkdb
def run_clients(options, workingDir, db_table_set):
    # Spawn one client for each db.table, up to options.clients at a time
    exit_event = multiprocessing.Event()
    processes = []
    error_queue = SimpleQueue()
    interrupt_event = multiprocessing.Event()
    sindex_counter = multiprocessing.Value(ctypes.c_longlong, 0)
    hook_counter = multiprocessing.Value(ctypes.c_longlong, 0)
    
    signal.signal(signal.SIGINT, lambda a, b: abort_export(a, b, exit_event, interrupt_event))
    errors = []

    try:
        progress_info = []
        arg_lists = []
        for db, table in db_table_set:
            
            tableSize = int(options.retryQuery("count", query.db(db).table(table).info()['doc_count_estimates'].sum()))
            
            progress_info.append((multiprocessing.Value(ctypes.c_longlong, 0),
                                  multiprocessing.Value(ctypes.c_longlong, tableSize)))
            arg_lists.append((db, table,
                              workingDir,
                              options,
                              error_queue,
                              progress_info[-1],
                              sindex_counter,
                              hook_counter,
                              exit_event,
                              ))


        # Wait for all tables to finish
        while processes or arg_lists:
            time.sleep(0.1)

            while not error_queue.empty():
                exit_event.set() # Stop immediately if an error occurs
                errors.append(error_queue.get())

            processes = [process for process in processes if process.is_alive()]

            if len(processes) < options.clients and len(arg_lists) > 0:
                newProcess = multiprocessing.Process(target=export_table, args=arg_lists.pop(0))
                newProcess.start()
                processes.append(newProcess)

            update_progress(progress_info, options)

        # If we were successful, make sure 100% progress is reported
        # (rows could have been deleted which would result in being done at less than 100%)
        if len(errors) == 0 and not interrupt_event.is_set() and not options.quiet:
            utils_common.print_progress(1.0, indent=4)

        # Continue past the progress output line and print total rows processed
        def plural(num, text, plural_text):
            return "%d %s" % (num, text if num == 1 else plural_text)

        if not options.quiet:
            print("\n    %s exported from %s, with %s, and %s" %
                  (plural(sum([max(0, info[0].value) for info in progress_info]), "row", "rows"),
                   plural(len(db_table_set), "table", "tables"),
                   plural(sindex_counter.value, "secondary index", "secondary indexes"),
                   plural(hook_counter.value, "hook function", "hook functions")
            ))
    finally:
        signal.signal(signal.SIGINT, signal.SIG_DFL)

    if interrupt_event.is_set():
        raise RuntimeError("Interrupted")

    if len(errors) != 0:
        # multiprocessing queues don't handle tracebacks, so they've already been stringified in the queue
        for error in errors:
            print("%s" % error[1], file=sys.stderr)
            if options.debug:
                print("%s traceback: %s" % (error[0].__name__, error[2]), file=sys.stderr)
        raise RuntimeError("Errors occurred during export")
예제 #41
0
파일: base.py 프로젝트: 01org/luv-yocto
    def measure_cmd_resources(self, cmd, name, legend, save_bs=False):
        """Measure system resource usage of a command"""
        def _worker(data_q, cmd, **kwargs):
            """Worker process for measuring resources"""
            try:
                start_time = datetime.now()
                ret = runCmd2(cmd, **kwargs)
                etime = datetime.now() - start_time
                rusage_struct = resource.getrusage(resource.RUSAGE_CHILDREN)
                iostat = OrderedDict()
                with open('/proc/{}/io'.format(os.getpid())) as fobj:
                    for line in fobj.readlines():
                        key, val = line.split(':')
                        iostat[key] = int(val)
                rusage = OrderedDict()
                # Skip unused fields, (i.e. 'ru_ixrss', 'ru_idrss', 'ru_isrss',
                # 'ru_nswap', 'ru_msgsnd', 'ru_msgrcv' and 'ru_nsignals')
                for key in ['ru_utime', 'ru_stime', 'ru_maxrss', 'ru_minflt',
                            'ru_majflt', 'ru_inblock', 'ru_oublock',
                            'ru_nvcsw', 'ru_nivcsw']:
                    rusage[key] = getattr(rusage_struct, key)
                data_q.put({'ret': ret,
                            'start_time': start_time,
                            'elapsed_time': etime,
                            'rusage': rusage,
                            'iostat': iostat})
            except Exception as err:
                data_q.put(err)

        cmd_str = cmd if isinstance(cmd, str) else ' '.join(cmd)
        log.info("Timing command: %s", cmd_str)
        data_q = SimpleQueue()
        try:
            proc = Process(target=_worker, args=(data_q, cmd,))
            proc.start()
            data = data_q.get()
            proc.join()
            if isinstance(data, Exception):
                raise data
        except CommandError:
            log.error("Command '%s' failed", cmd_str)
            raise
        etime = data['elapsed_time']

        measurement = OrderedDict([('type', self.SYSRES),
                                   ('name', name),
                                   ('legend', legend)])
        measurement['values'] = OrderedDict([('start_time', data['start_time']),
                                             ('elapsed_time', etime),
                                             ('rusage', data['rusage']),
                                             ('iostat', data['iostat'])])
        if save_bs:
            self.save_buildstats(name)

        self._append_measurement(measurement)

        # Append to 'times' array for globalres log
        e_sec = etime.total_seconds()
        self.times.append('{:d}:{:02d}:{:05.2f}'.format(int(e_sec / 3600),
                                                      int((e_sec % 3600) / 60),
                                                       e_sec % 60))
예제 #42
0
def spawn_import_clients(options, files_info):
    # Spawn one reader process for each db.table, as well as many client processes
    task_queue = SimpleQueue()
    error_queue = SimpleQueue()
    exit_event = multiprocessing.Event()
    interrupt_event = multiprocessing.Event()
    errors = []
    reader_procs = []
    client_procs = []

    parent_pid = os.getpid()
    signal.signal(signal.SIGINT, lambda a, b: abort_import(a, b, parent_pid, exit_event, task_queue, client_procs, interrupt_event))

    try:
        progress_info = []
        rows_written = multiprocessing.Value(ctypes.c_longlong, 0)

        for i in xrange(options["clients"]):
            client_procs.append(multiprocessing.Process(target=client_process,
                                                        args=(options["host"],
                                                              options["port"],
                                                              options["auth_key"],
                                                              task_queue,
                                                              error_queue,
                                                              rows_written,
                                                              options["force"],
                                                              options["durability"])))
            client_procs[-1].start()

        for file_info in files_info:
            progress_info.append((multiprocessing.Value(ctypes.c_longlong, -1), # Current lines/bytes processed
                                  multiprocessing.Value(ctypes.c_longlong, 0))) # Total lines/bytes to process
            reader_procs.append(multiprocessing.Process(target=table_reader,
                                                        args=(options,
                                                              file_info,
                                                              task_queue,
                                                              error_queue,
                                                              progress_info[-1],
                                                              exit_event)))
            reader_procs[-1].start()

        # Wait for all reader processes to finish - hooray, polling
        while len(reader_procs) > 0:
            time.sleep(0.1)
            # If an error has occurred, exit out early
            while not error_queue.empty():
                exit_event.set()
                errors.append(error_queue.get())
            reader_procs = [proc for proc in reader_procs if proc.is_alive()]
            update_progress(progress_info)

        # Wait for all clients to finish
        alive_clients = sum([client.is_alive() for client in client_procs])
        for i in xrange(alive_clients):
            task_queue.put(StopIteration())

        while len(client_procs) > 0:
            time.sleep(0.1)
            client_procs = [client for client in client_procs if client.is_alive()]

        # If we were successful, make sure 100% progress is reported
        if len(errors) == 0 and not interrupt_event.is_set():
            print_progress(1.0)

        def plural(num, text):
            return "%d %s%s" % (num, text, "" if num == 1 else "s")

        # Continue past the progress output line
        print("")
        print("%s imported in %s" % (plural(rows_written.value, "row"),
                                     plural(len(files_info), "table")))
    finally:
        signal.signal(signal.SIGINT, signal.SIG_DFL)

    if interrupt_event.is_set():
        raise RuntimeError("Interrupted")

    if len(errors) != 0:
        # multiprocessing queues don't handling tracebacks, so they've already been stringified in the queue
        for error in errors:
            print("%s" % error[1], file=sys.stderr)
            if options["debug"]:
                print("%s traceback: %s" % (error[0].__name__, error[2]), file=sys.stderr)
            if len(error) == 4:
                print("In file: %s" % error[3], file=sys.stderr)
        raise RuntimeError("Errors occurred during import")
예제 #43
0
def scan_regionset(regionset, options):
    """ This function scans all te region files in a regionset object
    and fills the ScannedRegionFile obj with the results
    """

    total_regions = len(regionset.regions)
    total_chunks = 0
    corrupted_total = 0
    wrong_total = 0
    entities_total = 0
    too_small_total = 0
    unreadable = 0

    # init progress bar
    if not options.verbose:
        pbar = progressbar.ProgressBar(
            widgets=['Scanning: ', FractionWidget(), ' ', progressbar.Percentage(), ' ', progressbar.Bar(left='[',right=']'), ' ', progressbar.ETA()],
            maxval=total_regions)

    # queue used by processes to pass finished stuff
    q = SimpleQueue()
    pool = multiprocessing.Pool(processes=options.processes,
            initializer=_mp_pool_init,initargs=(regionset,options,q))

    if not options.verbose:
        pbar.start()

    # start the pool
    # Note to self: every child process has his own memory space,
    # that means every obj recived by them will be a copy of the
    # main obj
    result = pool.map_async(multithread_scan_regionfile, regionset.list_regions(None), max(1,total_regions//options.processes))

    # printing status
    region_counter = 0

    while not result.ready() or not q.empty():
        time.sleep(0.01)
        if not q.empty():
            r = q.get()
            if r == None: # something went wrong scanning this region file
                          # probably a bug... don't know if it's a good
                          # idea to skip it
                continue
            if not isinstance(r,world.ScannedRegionFile):
                raise ChildProcessException(r)
            else:
                corrupted, wrong, entities_prob, shared_offset, num_chunks = r.get_counters()
                filename = r.filename
                # the obj returned is a copy, overwrite it in regionset
                regionset[r.get_coords()] = r
                corrupted_total += corrupted
                wrong_total += wrong
                total_chunks += num_chunks
                entities_total += entities_prob
                if r.status == world.REGION_TOO_SMALL:
                    too_small_total += 1
                elif r.status == world.REGION_UNREADABLE:
                    unreadable += 1
                region_counter += 1
                if options.verbose:
                  if r.status == world.REGION_OK:
                    stats = "(c: {0}, w: {1}, tme: {2}, so: {3}, t: {4})".format( corrupted, wrong, entities_prob, shared_offset, num_chunks)
                  elif r.status == world.REGION_TOO_SMALL:
                    stats = "(Error: not a region file)"
                  elif r.status == world.REGION_UNREADABLE:
                    stats = "(Error: unreadable region file)"
                  print("Scanned {0: <12} {1:.<43} {2}/{3}".format(filename, stats, region_counter, total_regions))
                else:
                    pbar.update(region_counter)

    if not options.verbose: pbar.finish()

    regionset.scanned = True
예제 #44
0
파일: client.py 프로젝트: ATRAN2/Futami
class InternalClient(Client):
    """This client is a fake client which is responsible for firing off
    all messages from the update notification side, and handling the
    routing of those messages to users watching.

    It does not have a socket, so it should not be included in the
    server's clients dictionary.
    """

    def __init__(self, server, nickname, user, host='localhost'):
        self.server = server
        self.nickname = nickname
        self.realname = nickname
        self.user = user
        self.host = host

        self._readbuffer = ""
        self._writebuffer = ""
        self.request_queue = SimpleQueue()
        self.response_queue = SimpleQueue()

        # dict of board => list of users
        self.board_watchers = defaultdict(list)

        # dict of board, thread => list of users
        self.thread_watchers = defaultdict(lambda: defaultdict(list))

        Process(
            target=Ami,
            name='immediate api worker',
            args=(self.request_queue, self.response_queue)
        ).start()

    def loop_hook(self):
        while not self.response_queue.empty():
            result = self.response_queue.get()

            # Handle exceptions in-band from child workers here.
            if isinstance(result, StoredException):
                print(result.traceback)
                raise RuntimeError(
                    "Exception caught from worker '{}', see above for exception details".format(
                        result.process,
                ))

            logger.debug("read from response queue {}".format(result))

            send_as = "/{}/{}".format(result.board, result.post_no)

            # Initial channel loads have identifiers, use them to find out
            # where to go
            if result.identifier:
                client, channel, target = result.identifier
                client = self.server.get_client(client)
                logger.debug("initial channel load, using identitifier info: sending to {} on {}".format(client, channel))

                if isinstance(target, BoardTarget):
                    self._send_message(
                        client, channel, result.summary,
                        sending_nick=send_as,
                    )
                    continue
                elif isinstance(target, ThreadTarget):
                    self._send_message(
                        client, channel, result.comment,
                        sending_nick=send_as,
                    )
                    continue

            if result.is_reply:  # Send to thread channel
                channel = "#/{}/{}".format(result.board, result.reply_to)
                logger.debug("sending reply to channel {}".format(channel))

                # TODO: Remove users who have disconnected from the server here
                for client in self.thread_watchers[result.board][result.reply_to]:
                    logger.debug("sending reply to {}".format(client))
                    self._send_message(
                        client, channel, result.comment,
                        sending_nick=send_as,
                    )
            else:
                channel = "#/{}/".format(result.board)
                logger.debug("sending thread update to channel {}".format(channel))

                # TODO: Remove users who have disconnected from the server here
                for client in self.board_watchers[result.board]:
                    self._send_message(
                        client, channel, result.summary,
                        sending_nick=send_as,
                    )

    def _parse_prefix(self, prefix):
        m = re.search(
            ":(?P<nickname>[^!]*)!(?P<username>[^@]*)@(?P<host>.*)",
            prefix
        )
        return m.groupdict()

    @property
    def socket(self):
        raise AttributeError('InternalClients have no sockets')

    def message(self, message):
        pass
        # prefix, message = message.split(" ", 1)

        # prefix = self._parse_prefix(prefix)

        # self.sending_client = self.server.get_client(prefix['nickname'])

        # self._readbuffer = message + '\r\n'
        # self._parse_read_buffer()

    def client_joined(self, client, channel):
        logger.debug("InternalClient handling {} joined {}".format(client, channel))

        channel_registration_map = {
            r'#/(.+)/$': self._client_register_board,
            r'#/(.+)/(\d+)$': self._client_register_thread,
        }

        matched_registration = False

        for regex, register_method in channel_registration_map.items():
            m = re.match(regex, channel.name)
            if m:
                register_method(client, channel, *m.groups())
                matched_registration = True
                break

        if not matched_registration:
            self._send_message(
                client, channel.name,
                "This channel ({}) doesn't look like a board. Nothing will happen in this channel.".format(channel.name)
            )
            return

    def _handle_command(self, command, arguments):
        # sending_client = self.sending_client
        # self.sending_client = None

        # Add handling here for actual input from users other than joins
        pass

    def _client_register_board(self, client, channel, board):
        logger.debug("registering to board: {}, {}, {}".format(client, channel, board))

        slash_board = '/{}/'.format(board)
        self._send_message(
            client, channel.name,
            "Welcome to {}, loading threads...".format(slash_board),
            sending_nick=slash_board,
        )

        target = BoardTarget(board)

        self.request_queue.put(
            SubscriptionUpdate.make(
                action=Action.LoadAndFollow,
                target=target,
                payload=(client.nickname, channel.name, target),
        ))

        self.board_watchers[board].append(client)

    def _client_register_thread(self, client, channel, board, thread):
        logging.debug("registering to thread: {}, {}, {}, {}".format(client, channel, board, thread))

        slash_board_thread = '/{}/{}'.format(board, thread)

        self._send_message(
            client, channel.name,
            "Welcome to >>>{}, loading posts...".format(slash_board_thread),
            sending_nick=slash_board_thread,
        )

        target = ThreadTarget(board, thread)

        self.request_queue.put(
            SubscriptionUpdate.make(
                action=Action.LoadAndFollow,
                target=target,
                payload=(client.nickname, channel.name, target),
        ))

        # Thread reply_tos are ints when they come back from the API
        self.thread_watchers[board][int(thread)].append(client)

    def _send_message(self, client, channel, message, sending_nick=None):
        if sending_nick:
            real_nick = self.nickname
            self.nickname = sending_nick

        client.message(
            ":{} PRIVMSG {} :{}".format(
                self.prefix,
                channel,
                message,
            )
        )

        if sending_nick:
            self.nickname = real_nick
예제 #45
0
class QueuePool(object):
	Process = QueueProcess

	def __init__(self, callback, pool_size=1, check_intervall=2):
		self.task_queue = SimpleQueue()
		self.result_queue = SimpleQueue()
		self._callback = callback
		self._pool = {}  # {process_name: process}
		self._tasks = {}  # {task_id: process_name}
		for _ in range(pool_size):
			process = self.Process(self.task_queue, self.result_queue)
			self._pool[process.name] = process
			process.start()
		# Check for progress periodically TODO: stop timer when queue is empty!
		self.timer = QTimer()
		self.timer.timeout.connect(self._check_for_results)
		self.timer.start(check_intervall * 1000)

	def _check_for_results(self):
		while not self.result_queue.empty():
			process_name, task_id, result_object, is_exception, is_ready = self.result_queue.get()
			if is_ready or is_exception:
				if task_id in self._tasks:
					del self._tasks[task_id]
			else:
				self._tasks[task_id] = process_name
			self._callback(task_id, result_object, is_exception, is_ready)

	def change_check_interval(self, new_interval_in_seconds):
		try:
			interval = float(new_interval_in_seconds)
		except ValueError:
			return
		self.timer.stop()
		self.timer.start(interval * 1000)

	def change_pool_size(self, new_pool_size):
		try:
			diff = int(new_pool_size) - len(self._pool)
		except ValueError:
			return
		if diff < 0:
			for _ in range(abs(diff)):
				process_name, process = self._pool.popitem()
				process.soft_interrupt.set()
		else:
			for _ in range(diff):
				process = QueueProcess(self.task_queue, self.result_queue, function=compute)
				self._pool[process.name] = process
				process.start()

	def add_task(self, task_id, *params):
		self.task_queue.put([task_id] + list(params))

	def cancel_task(self, task_id):
		process_name = self._tasks.get(task_id)
		if process_name is None:
			# task is not active, but it might be part of task_queue where it shall be removed from
			task_objects = []
			while not self.task_queue.empty():
				task_objects.append(self.task_queue.get())
			for obj in task_objects:
				if task_id != obj[0]:
					self.task_queue.put(obj)
			return
		process = self._pool.get(process_name)
		if process is None:
			# process might be already stopped -> ignore for now
			return
		process.hard_interrupt.set()

	def shutdown(self):
		for process in self._pool.values():
			process.hard_interrupt.set()
			self.task_queue.put(None)  # unblock queue

	def terminate(self):
		for process in self._pool.values():
			if process.exitcode is None:
				process.terminate()
예제 #46
0
def learning_loop(exit_flag: mp.Value,
                  gvfs: Sequence[Sequence[GTDLearner]],
                  main2gvf: mp.SimpleQueue,
                  gvf2plot: mp.SimpleQueue,
                  parsrs: List[Callable]):
    action, action_prob, obs, x = None, None, None, None

    # get first state
    while exit_flag.value == 0 and obs is None:
        while exit_flag.value == 0 and main2gvf.empty():
            time.sleep(0.01)
        if exit_flag.value == 0:
            if locks:
                print('gvf gm a 1 a')
                gmlock.acquire()
                print('gvf gm a 1 b')
            action, action_prob, obs, x = main2gvf.get()
            if locks:
                print('gvf gm r 1 a')
                gmlock.release()
                print('gvf gm r 1 b')

    # main loop
    # tt = 0
    # ts = []
    while exit_flag.value == 0:
        # ts.append(time.time() - tt) if tt > 0 else None
        # print(np.mean(ts))
        # tt = time.time()
        if locks:
            print('gvf gm a 2 a')
            gmlock.acquire()
            print('gvf gm a 2 b')
        while exit_flag.value == 0 and main2gvf.empty():
            if locks:
                print('gvf gm r 2 a')
                gmlock.release()
                print('gvf gm r 2 b')
            time.sleep(0.01)
            if locks:
                print('gvf gm a 3 a')
                gmlock.acquire()
                print('gvf gm a 3 b')
        if locks:
            print('gvf gm r 3 a')
            gmlock.release()
            print('gvf gm r 3 b')
        if exit_flag.value:
            break

        # get data from servos
        if locks:
            print('gvf gm a 4 a')
            gmlock.acquire()
            print('gvf gm a 4 b')
        actionp, action_probp, obsp, xp = main2gvf.get()
        if locks:
            print('gvf gm r 4 a')
            gmlock.release()
            print('gvf gm r 4 b')
        # update weights
        for gs, xi, xpi in zip(gvfs, x, xp):
            for g in gs:
                g.update(action, action_prob, obs, obsp, xi, xpi)

        # send data to plots
        gdata = [g.data(xi, obs, action, xpi, obsp)
                 for gs, xi, xpi in zip(gvfs, x, xp)
                 for g in gs]

        data = dict(ChainMap(*gdata))
        data['obs'] = obs
        data['x'] = x
        data = [parse(data) for parse in parsrs]
        if locks:
            print('gvf gp a 1 a')
            gplock.acquire()
            print('gvf gp a 1 b')
        # data = np.copy(data)
        gvf2plot.put(data)
        if locks:
            print('gvf gp r 1 a')
            gplock.release()
            print('gvf gp r 1 b')

        # go to next state
        obs = obsp
        x = xp
        action = actionp
        action_prob = action_probp

    print('Done learning!')
예제 #47
0
            while True:
                try:
                    if not self.send_queue.empty(): # If their is something to send
                        message = self.send_queue.get()
                        message = str(len(message.encode())).zfill(4) + message # Prefix four digit byte amount
                        connection.send(message.encode())
                        print("Sent message")
                    size_prefix = connection.recv(4).decode() # Recieve four bytes
                    if size_prefix == '': # If disconnected
                        print("Connection finished")
                        break
                    if size_prefix: # If message was recieved
                        self.recv_queue.put(connection.recv(int(size_prefix)).decode())
                        print("Recieved message")
                except Exception as error:
                    if error.errno != EWOULDBLOCK: # Remove error messages due to non-blocking
                        print(error)


# Setup process stuff

recv_queue = SimpleQueue()
send_queue = SimpleQueue()

server = Server(send_queue, recv_queue)
server.start()

while True:
    send_queue.put("mayonaise")
    print(recv_queue.get())
예제 #48
0
async def servo_loop(device: str,
                     sids: Sequence[int],
                     coder: KanervaCoder,
                     main2gvf: mp.SimpleQueue,
                     gvf2main: mp.SimpleQueue,
                     **kwargs):
    # objects to read and write from servos
    sr, sw = await serial_asyncio.open_serial_connection(url=device,
                                                         **kwargs)

    # set servo speeds to slowest possible
    for sid in sids:
        await send_msg(sr, sw, sid, [0x03, 0x20, 0x00, 0x01])

    # set initial action
    action = initial_action

    # some constants
    read_data = [0x02,  # read
                 0x24,  # starting from 0x24
                 0x08]  # a string of 8 bytes

    # read_all = [0x02,  # read
    #             0x00,  # starting from the beginning
    #             0x32]  # all the bytes

    store_data = []

    try:
        for _ in range(20000):
            # read data from servos
            byte_data = [await send_msg(sr, sw, sid, read_data) for sid in sids]

            # convert to human-readable data
            obs = sum([parse_data(bd) for bd in byte_data], []) + list(action)

            # get active tiles in kanerva coding
            active_pts = coder(obs)

            # send action and features to GVFs
            gvf_data = (obs, active_pts)
            main2gvf.put(gvf_data)

            # get action control GVFs
            action = gvf2main.get()

            # send action to servos
            instructions = [goal_instruction(a)
                            for a in action
                            if a is not None]
            for sid, instr in zip(sids, instructions):
                await send_msg(sr, sw, sid, instr)

            # record data for later
            store_data.append(gvf_data)

        np.save('offline_data.npy', store_data)

    except KeyboardInterrupt:
        pass
    finally:
        sr.read()
        await sw.drain()

        for sid in sids:
            write(sw, sid, [0x03, 0x18, 0x00])  # disable torque
예제 #49
0
def run_clients(options, db_table_set):
    # Spawn one client for each db.table
    exit_event = multiprocessing.Event()
    processes = []
    error_queue = SimpleQueue()
    interrupt_event = multiprocessing.Event()
    sindex_counter = multiprocessing.Value(ctypes.c_longlong, 0)

    signal.signal(signal.SIGINT, lambda a, b: abort_export(a, b, exit_event, interrupt_event))
    errors = [ ]

    try:
        sizes = get_all_table_sizes(options["host"], options["port"], options["auth_key"], db_table_set)

        progress_info = []

        arg_lists = []
        for db, table in db_table_set:
            progress_info.append((multiprocessing.Value(ctypes.c_longlong, 0),
                                  multiprocessing.Value(ctypes.c_longlong, sizes[(db, table)])))
            arg_lists.append((options["host"],
                              options["port"],
                              options["auth_key"],
                              db, table,
                              options["directory_partial"],
                              options["fields"],
                              options["delimiter"],
                              options["format"],
                              error_queue,
                              progress_info[-1],
                              sindex_counter,
                              exit_event))


        # Wait for all tables to finish
        while len(processes) > 0 or len(arg_lists) > 0:
            time.sleep(0.1)

            while not error_queue.empty():
                exit_event.set() # Stop rather immediately if an error occurs
                errors.append(error_queue.get())

            processes = [process for process in processes if process.is_alive()]

            if len(processes) < options["clients"] and len(arg_lists) > 0:
                processes.append(multiprocessing.Process(target=export_table,
                                                         args=arg_lists.pop(0)))
                processes[-1].start()

            update_progress(progress_info)

        # If we were successful, make sure 100% progress is reported
        # (rows could have been deleted which would result in being done at less than 100%)
        if len(errors) == 0 and not interrupt_event.is_set():
            print_progress(1.0)

        # Continue past the progress output line and print total rows processed
        def plural(num, text, plural_text):
            return "%d %s" % (num, text if num == 1 else plural_text)

        print("")
        print("%s exported from %s, with %s" %
              (plural(sum([max(0, info[0].value) for info in progress_info]), "row", "rows"),
               plural(len(db_table_set), "table", "tables"),
               plural(sindex_counter.value, "secondary index", "secondary indexes")))
    finally:
        signal.signal(signal.SIGINT, signal.SIG_DFL)

    if interrupt_event.is_set():
        raise RuntimeError("Interrupted")

    if len(errors) != 0:
        # multiprocessing queues don't handling tracebacks, so they've already been stringified in the queue
        for error in errors:
            print("%s" % error[1], file=sys.stderr)
            if options["debug"]:
                print("%s traceback: %s" % (error[0].__name__, error[2]), file=sys.stderr)
        raise RuntimeError("Errors occurred during export")