コード例 #1
0
    def __init__(self,
                 config,
                 share_batches=True,
                 manager=None,
                 new_process=True):
        if new_process == True and manager is None:
            manager = Manager()
        self.knows = Semaphore(0)  # > 0 if we know if any are coming
        # == 0 if DatasetReader is processing a command
        self.working = Semaphore(1 if new_process else 100)
        self.finished_reading = Lock(
        )  # locked if we're still reading from file
        # number of molecules that have been sent to the pipe:
        self.in_pipe = Value('i', 0)

        # Tracking what's already been sent through the pipe:
        self._example_number = Value('i', 0)

        # The final kill switch:
        self._close = Value('i', 0)

        self.command_queue = manager.Queue(10)
        self.molecule_pipeline = None
        self.batch_queue = Queue(config.data.batch_queue_cap
                                 )  #manager.Queue(config.data.batch_queue_cap)
        self.share_batches = share_batches

        self.dataset_reader = DatasetReader("dataset_reader",
                                            self,
                                            config,
                                            new_process=new_process)
        if new_process:
            self.dataset_reader.start()
コード例 #2
0
    def eval_init(self):
        self.processes = []
        M = Manager()
        self.res_Q = M.Queue()
        self.sync_Q = []
        for idx in range(self.process_num):
            
            print('process',idx)
            syncq = M.Queue()
            p = EvalProcess(self.sub_envs_args[idx],self.global_model, self.res_Q, syncq, idx)

            self.sync_Q.append(syncq)
            self.processes.append(p)
        
        for p in self.processes:
            p.start()
コード例 #3
0
    def run(self):
        m = Manager()
        global_ep, res_dict = m.dict(), m.dict()
        global_ep['g_ep'] = 0
        global_ep['g_sum_step'] = 0
        res_dict['res_q'] = []
        rew_queue = m.Queue()
        agent_input_dict_list = m.list([self.input_dict for _ in range(1)])
        agent_return_dict_list = m.list([m.dict() for _ in range(1)])

        a3c_workers = [
            A3CWorker(global_ep, res_dict, rew_queue, agent_input_dict_list[i],
                      agent_return_dict_list[i], i) for i in range(1)
        ]

        [w.start() for w in a3c_workers]
        [w.join() for w in a3c_workers]

        res = res_dict['res_q']
        print('game_result:', res)
        for agent_return_dict in agent_return_dict_list:
            print(agent_return_dict)

        win_rate, tie_rate, lose_rate, step_game = self.calculate_statistics(
            agent_return_dict_list)
        print(win_rate, tie_rate, lose_rate, step_game)
        self.return_dict[int(self.nth_pbt_process)] = [win_rate, step_game]
コード例 #4
0
    def __call__(self,
                 instances: Iterable[Instance],
                 num_epochs: int = None,
                 shuffle: bool = True) -> Iterator[TensorDict]:

        # If you run it forever, the multiprocesses won't shut down correctly.
        # TODO(joelgrus) find a solution for this
        if num_epochs is None:
            raise ConfigurationError(
                "Multiprocess Iterator must be run for a fixed number of epochs"
            )

        manager = Manager()
        output_queue = manager.Queue(self.output_queue_size)
        input_queue = manager.Queue(self.output_queue_size * self.batch_size)

        # Start process that populates the queue.
        self.queuer = Process(target=_queuer,
                              args=(instances, input_queue, self.num_workers,
                                    num_epochs))
        self.queuer.start()

        # Start the tensor-dict workers.
        for i in range(self.num_workers):
            args = (input_queue, output_queue, self.iterator, shuffle, i)
            process = Process(target=_create_tensor_dicts, args=args)
            process.start()
            self.processes.append(process)

        num_finished = 0
        while num_finished < self.num_workers:
            item = output_queue.get()
            if isinstance(item, int):
                num_finished += 1
                logger.info(
                    f"worker {item} finished ({num_finished} / {self.num_workers})"
                )
            else:
                yield item

        for process in self.processes:
            process.join()
        self.processes.clear()

        if self.queuer is not None:
            self.queuer.join()
            self.queuer = None
コード例 #5
0
ファイル: aliVOS.py プロジェクト: yutliu/betterSAT
 def run_tracker(self):
     """
     Run self.pipeline on Ali vos
     """
     num_gpu = self._hyper_params["device_num"]
     all_devs = [torch.device("cuda:%d" % i) for i in range(num_gpu)]
     logger.info('runing test on devices {}'.format(all_devs))
     ALI_root = self._hyper_params["data_root"]
     logger.info('Using dataset %s at: %s' % (self.dataset_name, ALI_root))
     # setup dataset
     dataset = davis_benchmark.load_dataset(
         ALI_root, self.dataset_name
     )  # OrderedDict:30 bike-packing, blackswan, bmx-trees...
     self.dataset = dataset
     keys = list(dataset.keys())  # list:30
     keys.sort()
     nr_records = len(keys)
     pbar = tqdm(total=nr_records)
     mean_speed = -1
     speed_list = []
     manager = Manager()
     speed_queue = manager.Queue(500)
     # set worker
     if num_gpu == 0:
         self.worker(keys, all_devs[0], self.dataset, speed_queue)
         for i in range(nr_records):
             s = speed_queue.get()
             speed_list.append(s)
             pbar.update(1)
     else:
         nr_video = math.ceil(nr_records / num_gpu)  # 每个gpu负载的测试视频数量
         procs = []
         for i in range(num_gpu):
             start = i * nr_video  # 0
             end = min(start + nr_video, nr_records)  # 两块gpu, 30/2 = 15
             split_records = keys[start:end]
             proc = mp.Process(target=self.worker,
                               args=(split_records, all_devs[i],
                                     self.dataset, speed_queue))
             logger.info('process:%d, start:%d, end:%d' % (i, start, end))
             proc.start()
             procs.append(proc)
         for i in range(nr_records):
             s = speed_queue.get()
             speed_list.append(s)
             pbar.update(1)
         for p in procs:
             p.join()
     # print result
     mean_speed = float(np.mean(speed_list))
     logger.info('Mean Speed: {:.2f} FPS'.format(mean_speed))
     self._state['speed'] = mean_speed
コード例 #6
0
    def _instances(self, file_path: str, manager: Manager, output_queue: Queue) -> Iterator[Instance]:
        """
        A generator that reads instances off the output queue and yields them up
        until none are left (signified by all ``num_workers`` workers putting their
        ids into the queue).
        """
        shards = list(CORPORA[file_path](file_path))
        # Ensure a consistent order before shuffling for testing.
        # shards.sort()
        num_shards = len(shards)

        # If we want multiple epochs per read, put shards in the queue multiple times.
        input_queue = manager.Queue(
            num_shards * self.epochs_per_read + self.num_workers)
        for _ in range(self.epochs_per_read):
            np.random.shuffle(shards)
            for shard in shards:
                input_queue.put(shard)

        # Then put a None per worker to signify no more files.
        for _ in range(self.num_workers):
            input_queue.put(None)

        processes: List[Process] = []
        num_finished = 0

        for worker_id in range(self.num_workers):
            process = Process(target=_worker,
                              args=(self.reader, input_queue, output_queue, worker_id))
            logger.info(f"starting worker {worker_id}")
            process.start()
            processes.append(process)

        # Keep going as long as not all the workers have finished.
        while num_finished < self.num_workers:
            item = output_queue.get()
            if item is None:
                continue
            elif isinstance(item, int):
                # Means a worker has finished, so increment the finished count.
                num_finished += 1
                logger.info(
                    f"worker {item} finished ({num_finished}/{self.num_workers})")
            else:
                # Otherwise it's an ``Instance``, so yield it up.
                yield item

        for process in processes:
            process.join()
        processes.clear()
コード例 #7
0
        class QIterable(Iterable[Instance]):
            """
            You can't set attributes on Iterators, so this is just a dumb wrapper
            that exposes the output_queue. Currently you probably shouldn't touch
            the output queue, but this is done with an eye toward implementing
            a data iterator that can read directly from the queue (instead of having
            to use the _instances iterator we define here.)
            """
            def __init__(self) -> None:
                self.manager = Manager()
                self.output_queue = self.manager.Queue(outer_self.output_queue_size)
                self.num_workers = outer_self.num_workers

            def __iter__(self) -> Iterator[Instance]:
                # pylint: disable=protected-access
                return outer_self._instances(file_path, self.manager, self.output_queue)
コード例 #8
0
ファイル: supervised.py プロジェクト: michelgokan/AlphaTSP
def run(args):

    # setup
    N, D = args.N, args.D
    n_examples = args.n_train_examples
    n_threads = args.n_threads
    n_examples_per_thread = n_examples // n_threads

    # create policy network
    policy_network = alphatsp.util.get_policy_network(args.policy_network)

    # generate examples
    print("Generating examples and training...")

    manager = Manager()
    train_queue = manager.Queue()
    shared_dict = manager.dict()

    shared_dict["success"] = False

    producers = []
    for _ in range(n_threads):
        producers.append(
            Process(target=generate_examples,
                    args=(n_examples_per_thread, train_queue, args)))

    for p in producers:
        p.start()

    c = Process(target=train,
                args=(policy_network, train_queue, shared_dict, args))
    c.start()

    for p in producers:
        p.join()
    train_queue.put(None)

    c.join()

    status = shared_dict["success"]
    if not status:
        print("Experiment failed.")
        return -1
コード例 #9
0
    def start_multiprocessing(self, embeddings, naming_list, naming_dict,  dataset):
        n_threads = 4
        chunked = chunkIt(naming_list, n_threads)
        # multiprocess gridsearch and have a seperate thread for the progress bar.
        pool1 = Pool(processes=n_threads)
        m = Manager()
        q = m.Queue()
        p = Process(target=progressBar, args=(len(naming_list), q,))
        p.start()

        results = pool1.starmap(self.determine_triplets,
                                zip(n_threads * [q],
                                    chunked,
                                    n_threads * [naming_list],
                                    n_threads * [embeddings],
                                    n_threads * [naming_dict],
                                    n_threads * [dataset]))
        final_results = []
        for r in results:
            final_results += r

        p.join()
        pool1.close()
        return final_results
コード例 #10
0
ファイル: app.py プロジェクト: PeacefulCoder01/hac-tech-2019
class App:
    def __init__(self, args):
        self.args = args
        ctx = get_context('spawn')
        self.manager = Manager()
        self.smart_value = self.manager.Value('i', 0)
        self.connection = Pipe()
        self.queue = self.manager.Queue()
        # self.stream_process = ctx.Process(target=client, args=(self.args, None))
        # self.detection_process = ctx.Process(target=server, args=(None, None))
        self.input_thread = threading.Thread(name='input_thread',
                                             target=self.collect_rowdata)
        self.detection_thread = threading.Thread(name='input_thread',
                                                 target=self.detection_loop)

    def run(self):
        # self.stream_process.start()
        self.detection_thread.start()
        self.input_thread.start()

    def collect_rowdata(self):
        print("start client...")
        print("args", self.args)
        host = "127.0.0.1"  # set to IP address of target computer
        port = 13000
        addr = (host, port)
        UDPSock = socket.socket(AF_INET, SOCK_DGRAM)
        data = str.encode("client connected")
        while True:
            data = str.encode(
                input("\nEnter message to send or type 'exit': "))
            if data.decode("utf-8") == "exit":
                break
            UDPSock.sendto(data, addr)
        UDPSock.close()
        os._exit(0)

    def detection_loop(self):
        print("start server...")
        host = ""
        port = 13000
        buf = 1024
        addr = (host, port)
        UDPSock = socket.socket(AF_INET, SOCK_DGRAM)
        UDPSock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
        UDPSock.bind(addr)
        print("\nWaiting to receive messages...")

        while True:
            (data, addr) = UDPSock.recvfrom(buf)
            print("\nReceived message: " + data.decode("utf-8"))
            if data == "exit":
                break

        UDPSock.close()
        os._exit(0)

    def __pred(self, model_output):
        return torch.argmax(model_output, dim=1)

    def __init_model(self):
        args = self.args
        model = DetectorMultiLSTM(input_size=args["Model"]["input_size"],
                                  hidden_size=args["Model"]["hidden_size"],
                                  target_size=args['Model']['num_classes'])
        model.eval()
        return model