Пример #1
0
def feeder(dictionary: list, queue: multiprocessing.JoinableQueue):
    """
    Feed your words into a queue
    :param dictionary:
    :param queue:
    :return:
    """

    print("Feeding words into the pipeline")
    count = 0
    for word in dictionary:
        count += 1
        while queue.full():
            # print("Sleeping .0005 seconds to allow queue to empty.")
            time.sleep(.0005)

        queue.put(word)
        if count % 100000 == 0:
            print(f"Placed the {count}th word into the queue.")
Пример #2
0
class RelayAgent(object):
    """Dummy relay agent.

    This agent subscribe to the '/decision_result' topic from environment
    and put the received data into a queue. Then it retrieves and replay
    the queued data when the its `step()` method is callled.
    """
    def __init__(self, queue_len):
        self.q = Queue(queue_len)
        rospy.Subscriber('/decision_result', Int16, self.__mover)

    def __mover(self, data):
        try:
            key = data.data
            # print "[__mover]: {}".format(key)
            if self.q.full():
                self.q.get_nowait()
                self.q.task_done()
            self.q.put(key, timeout=0.1)
        except Exception as e:
            print "[__mover]: action enque failed. {}".format(e.message)
        return

    def step(self, *args, **kwargs):
        while True:
            try:
                action = self.q.get(timeout=0.1)
                self.q.task_done()
                break
            except:
                print "[step]: get action failed"
                time.sleep(0.1)
        # print "[step()]: action = {}".format(action)
        return action, {}

    def set_session(self, *args):
        return
Пример #3
0
class CAN:
    def __init__(self):
        # Distance Buffer
        self.distance_buffer = JoinableQueue(100)
        self.last_received_range_frame = RangeCANFrame(0, 0)

        # RFID Buffer
        self.RFID_buffer = JoinableQueue(100)

        # Bluetooth Remote Control Command Buffer
        self.btrc_buffer = JoinableQueue(100)

        # WiFi Control Center Message Buffer
        self.wfcc_buffer = JoinableQueue(100)

        # WiFi Train State Message Buffer
        self.wfts_buffer = JoinableQueue(100)

    def update_distance_buffer(self, distance_to_obstacle, timestamp):
        if self.distance_buffer.full():
            self.distance_buffer.get()
        self.distance_buffer.put(RangeCANFrame(distance_to_obstacle,
                                               timestamp))

    def update_RFID_buffer(self, RFID, timestamp):
        if self.RFID_buffer.full():
            self.RFID_buffer.get()
        self.RFID_buffer.put(RFIDCANFrame(RFID, timestamp))

    def update_btrc_buffer(self, btrc_command, timestamp):
        if self.btrc_buffer.full():
            self.btrc_buffer.get()
        self.btrc_buffer.put(
            BluetoothRemoteControlCANFrame(btrc_command, timestamp))

    def update_wfcc_buffer(self, wfcc_message, timestamp):
        if self.wfcc_buffer.full():
            self.wfcc_buffer.get()
        self.wfcc_buffer.put(WiFiControlCenterCANFrame(wfcc_message,
                                                       timestamp))

    def update_wfts_buffer(self, most_recent_position, mode, state, decision,
                           timestamp):
        if self.wfts_buffer.full():
            self.wfts_buffer.get()
        self.wfts_buffer.put(
            WiFiTrainStateCANFrame(most_recent_position, mode, state, decision,
                                   timestamp))

    def get_range_frame(self):
        if not self.distance_buffer.empty():
            self.last_received_range_frame = self.distance_buffer.get_nowait()
        return self.last_received_range_frame

    def get_RFID_frame(self):
        if not self.RFID_buffer.empty():
            return self.RFID_buffer.get_nowait()
        else:
            return RFIDCANFrame("NaRFID", 0)

    def get_btrc_frame(self):
        if not self.btrc_buffer.empty():
            return self.btrc_buffer.get_nowait()
        else:
            return BluetoothRemoteControlCANFrame("None", 0)

    def get_wfcc_frame(self):
        if not self.wfcc_buffer.empty():
            wfcc_frame = self.wfcc_buffer.get()
            return wfcc_frame
        else:
            return WiFiControlCenterCANFrame("None", 0)

    def get_wfts_frame(self):
        if not self.wfts_buffer.empty():
            wfts_frame = self.wfts_buffer.get()
            return wfts_frame
        else:
            return WiFiTrainStateCANFrame(-1, "UNKNOWN", "UNKNOWN", "NO", 0)
Пример #4
0
 pair_buffer = {}
 scaffold_count = {}
 #     while (not inq.empty()) or sum( [reader.is_alive() for reader in readers] )>0:
 while True:
     if args.debug: print("get")
     try:
         procid, scaffold, pairs = inq.get()
         #               procid,scaffold,pairs = inq.get(True,10)
         #print("#got data:",procid,scaffold,len(pairs))
         print("#got data from inq:",
               procid,
               scaffold,
               len(pairs),
               inq.empty(),
               inq.qsize(),
               inq.full(),
               strftime("%Y-%m-%d %H:%M:%S"),
               sum([reader.is_alive() for reader in readers]),
               "q.size():",
               q.qsize(),
               file=sys.stderr,
               sep="\t")
         sys.stderr.flush()
         sys.stdout.flush()
     except Exception as e:
         print(e, file=sys.stderr)
         if args.top:
             print("queue get timed out",
                   [reader.cpu_percent() for reader in reader_procs],
                   [worker.cpu_percent() for worker in worker_procs])
         #print("#timed out",inq.empty())
Пример #5
0
class AnalysisManager():
    """Manage all analysis' process."""

    def __init__(self):
        # Processing pool.
        logger.debug("Using pool on %i core" % self.get_parallelism())
        # Load modules.
        self.modules = []
        self.load_modules()
        self.check_module_deps()
        # Starting worker pool.
        self.workers = []
        self.tasks = JoinableQueue(self.get_parallelism())
        self.workers_start()

    def workers_start(self):
        """Start workers pool."""
        for _ in range(self.get_parallelism()):
            runner = AnalysisRunner(self.tasks, self.modules)
            runner.start()
            self.workers.append(runner)

    def workers_stop(self):
        """Stop workers pool."""
        # Wait for.
        for sex_worker in self.workers:
            sex_worker.join()

    def get_parallelism(self):
        """Get the ghiro parallelism level for analysis processing."""
        # Check database type. If we detect SQLite we slow down processing to
        # only one process. SQLite does not support parallelism.
        if settings.DATABASES["default"]["ENGINE"].endswith("sqlite3"):
            logger.warning("Detected SQLite database, decreased parallelism to 1. SQLite doesn't support parallelism.")
            return 1
        elif cpu_count() > 1:
            # Set it to total CPU minus one let or db and other use.
            return cpu_count() - 1
        else:
            return 1

    def load_modules(self):
        """Load modules."""
        # Search for analysis modules, it need to import module directory as package named "modules".
        for loader_instance, module_name, is_pkg in pkgutil.iter_modules(modules.__path__, modules.__name__ + "."):
            # Skip packages.
            if is_pkg:
                continue
            # Load module.
            # NOTE: This code is inspired to Cuckoo Sandbox module loading system.
            try:
                module = __import__(module_name, globals(), locals(), ["dummy"], -1)
            except ImportError as e:
                logger.error("Unable to import module: %s" % module)
            else:
                for class_name, class_pkg in inspect.getmembers(module):
                    if inspect.isclass(class_pkg):
                        # Load only modules which inherits BaseModule.
                        if issubclass(class_pkg, BaseProcessingModule) and class_pkg is not BaseProcessingModule:
                            self.modules.append(class_pkg)
                            logger.debug("Found module: %s" % class_name)

        # Sort modules by execution order.
        self.modules.sort(key=lambda x: x.order)

    def check_module_deps(self):
        """Check modules for requested deps, if not found removes the module from the list."""
        for plugin in self.modules:
            # NOTE: create the module class instance.
            if not plugin().check_deps():
                self.modules.remove(plugin)
                logger.warning("Kicked module, requirements not found: %s" % plugin.__name__)

    def run(self):
        """Start all analyses."""
        # Clean up tasks remaining stale from old runs.
        if Analysis.objects.filter(state="P").exists():
            logger.info("Found %i stale analysis, putting them in queue." % Analysis.objects.filter(state="P").count())
            Analysis.objects.filter(state="P").update(state="W")

        # Infinite finite loop.
        try:
            while True:
                # Fetch tasks waiting processing.
                tasks = Analysis.objects.filter(state="W").order_by("id")

                if tasks.exists() and not self.tasks.full():
                    # Using iterator() to avoid caching.
                    for task in Analysis.objects.filter(state="W").order_by("id").iterator():
                        self.tasks.put(task)
                        logger.debug("Processing task %s" % task.id)
                        task.state = "P"
                        task.save()
                elif self.tasks.full():
                    logger.debug("Queue full. Waiting...")
                    sleep(1)
                else:
                    logger.debug("No tasks. Waiting...")
                    sleep(1)
        except KeyboardInterrupt:
            print("Exiting... (requested by user)")
        finally:
            print("Waiting tasks to accomplish...")
            self.workers_stop()
            print("Processing done. Have a nice day in the real world.")
Пример #6
0
class ImagesBatcher(AbstractDataBatcher):
    def __init__(
            self,
            queue_size,
            batch_size,
            data_sampler,
            image_processor=None,
            audio_processor=None,
            single_epoch=False,
            cache_data=False,  # TODO: implement me!
            disk_reader_process_num=1):
        """
        Class for creating sequence of data batches for training or validation.
        :param queue_size: queue size for Batch readers
        :param batch_size: size of batches generated
        :param dataset_parser: dataset structure-related parser with all images and labels
        :param image_processor: image reading and preprocessing routine
        :param data_sampler: knows how to sample batches from dataset
        :param single_epoch: if enabled, image batcher finish one epoch with None batch
        :param cache_data: do we need to store all data in batcher memory?
        :param disk_reader_process_num: how many disk readers do we need?
        """
        super(AbstractDataBatcher, self).__init__()

        # set parameters
        self.batch_size = batch_size
        self.epoch_is_finished = False
        self.batch_queue_balance = 0
        if single_epoch:
            self.sampler_external_info = type('sampler_external_info',
                                              (object, ),
                                              dict(single_epoch=True))
        else:
            self.sampler_external_info = None

        # parse given dataset and init data sampler
        self.data_sampler = data_sampler

        # set queues
        if queue_size == -1:
            queue_size = self.data_sampler.dataset_size() / self.batch_size + 1
        self.task_queue = JoinableQueue(queue_size)
        self.batch_queue = JoinableQueue(queue_size)

        # init batch disk readers and start they
        self.data_readers = []
        print('disk_reader_process_num:', disk_reader_process_num)
        for i in range(disk_reader_process_num):
            self.data_readers.append(
                (BatchDiskReader(self.task_queue, self.batch_queue,
                                 image_processor, audio_processor)))

    def start(self):
        self.epoch_is_finished = False

        # start batch disk readers
        for reader in self.data_readers:
            reader.start()

        # fill task queue with batches to start async reading from disk
        self.fill_task_queue()

    def fill_task_queue(self):
        try:
            while True:
                if not self.task_queue.full():
                    batch = self.data_sampler.sampling(
                        self.batch_size, self.sampler_external_info)
                    if batch is not None:
                        self.task_queue.put_nowait(batch)
                        self.batch_queue_balance += 1
                    else:
                        self.epoch_is_finished = True
                        break
                else:
                    break
        except Exception as e:  #Queue.Full:
            logger.error("ImagesBatcher: ", e)

    def next_batch(self):
        """
        Returns next batch from data
        """
        if self.epoch_is_finished and self.batch_queue_balance == 0:
            self.epoch_is_finished = False
            self.fill_task_queue()
            return None

        batch = self.batch_queue.get(block=True)
        self.batch_queue.task_done()
        self.batch_queue_balance -= 1
        if not self.epoch_is_finished:
            # fill task queue
            self.fill_task_queue()
        return batch

    def update_sampler(self, target, logits, step, summary_writer):
        if hasattr(self.data_sampler, 'update'):
            labels = target.cpu().data.numpy()
            is_update_sampler = self.data_sampler.update(
                labels, logits, step, summary_writer)
        #if is_update_sampler:
        #    self.clear_queue()

    def clear_queue(self):
        try:
            while True:
                self.task_queue.get_nowait()
                self.task_queue.task_done()
        except Exception as e:
            pass
        try:
            while True:
                self.batch_queue.get_nowait()
                self.batch_queue.task_done()
        except Exception as e:
            pass
        self.fill_task_queue()

    def finish(self):
        for data_reader in self.data_readers:
            data_reader.deactivate()

        while not self.task_queue.empty():
            self.task_queue.get()
            self.task_queue.task_done()

        is_anybody_alive = [
            data_reader.is_alive() for data_reader in self.data_readers
        ].count(True) > 0
        while not self.batch_queue.empty() or is_anybody_alive:
            try:
                self.batch_queue.get(timeout=1)
                self.batch_queue.task_done()
                is_anybody_alive = [
                    data_reader.is_alive() for data_reader in self.data_readers
                ].count(True) > 0
            except Exception as e:
                pass

        self.task_queue.join()
        self.batch_queue.join()
        for data_reader in self.data_readers:
            data_reader.join()
Пример #7
0
class AnalysisManager():
    """Manage all analysis' process."""

    def __init__(self):
        # Processing pool.
        logger.debug("Using pool on %i core" % self.get_parallelism())
        # Load modules.
        self.modules = []
        self.load_modules()
        self.check_module_deps()
        # Starting worker pool.
        self.workers = []
        self.tasks = JoinableQueue(self.get_parallelism())

    def workers_start(self):
        """Start workers pool."""
        for _ in range(self.get_parallelism()):
            runner = AnalysisRunner(self.tasks, self.modules)
            runner.start()
            self.workers.append(runner)

    def workers_stop(self):
        """Stop workers pool."""
        # Wait for.
        for sex_worker in self.workers:
            sex_worker.join()

    def get_parallelism(self):
        """Get the ghiro parallelism level for analysis processing."""
        # Check database type. If we detect SQLite we slow down processing to
        # only one process. SQLite does not support parallelism.
        if settings.DATABASES["default"]["ENGINE"].endswith("sqlite3"):
            logger.warning("Detected SQLite database, decreased parallelism to 1. SQLite doesn't support parallelism.")
            return 1
        elif cpu_count() > 1:
            # Set it to total CPU minus one let or db and other use.
            return cpu_count() - 1
        else:
            return 1

    def load_modules(self):
        """Load modules."""
        # Search for analysis modules, it need to import module directory as package named "modules".
        for loader_instance, module_name, is_pkg in pkgutil.iter_modules(modules.__path__, modules.__name__ + "."):
            # Skip packages.
            if is_pkg:
                continue
            # Load module.
            # NOTE: This code is inspired to Cuckoo Sandbox module loading system.
            try:
                module = __import__(module_name, globals(), locals(), ["dummy"], -1)
            except ImportError as e:
                logger.error("Unable to import module: %s" % module)
            else:
                for class_name, class_pkg in inspect.getmembers(module):
                    if inspect.isclass(class_pkg):
                        # Load only modules which inherits BaseModule.
                        if issubclass(class_pkg, BaseProcessingModule) and class_pkg is not BaseProcessingModule:
                            self.modules.append(class_pkg)
                            logger.debug("Found module: %s" % class_name)

        # Sort modules by execution order.
        self.modules.sort(key=lambda x: x.order)

    def check_module_deps(self):
        """Check modules for requested deps, if not found removes the module from the list."""
        for plugin in self.modules:
            # NOTE: create the module class instance.
            if not plugin().check_deps():
                self.modules.remove(plugin)
                logger.warning("Kicked module, requirements not found: %s" % plugin.__name__)

    def run(self):
        """Start all analyses."""
        # Start workers.
        self.workers_start()

        # Clean up tasks remaining stale from old runs.
        if Analysis.objects.filter(state="P").exists():
            logger.info("Found %i stale analysis, putting them in queue." % Analysis.objects.filter(state="P").count())
            Analysis.objects.filter(state="P").update(state="W")

        # Infinite finite loop.
        try:
            while True:
                # Fetch tasks waiting processing.
                tasks = Analysis.objects.filter(state="W").order_by("id")

                if tasks.exists() and not self.tasks.full():
                    # Using iterator() to avoid caching.
                    for task in Analysis.objects.filter(state="W").order_by("id").iterator():
                        self.tasks.put(task)
                        logger.debug("Processing task %s" % task.id)
                        task.state = "P"
                        task.save()
                elif self.tasks.full():
                    logger.debug("Queue full. Waiting...")
                    sleep(1)
                else:
                    logger.debug("No tasks. Waiting...")
                    sleep(1)
        except KeyboardInterrupt:
            print("Exiting... (requested by user)")
        finally:
            print("Waiting tasks to accomplish...")
            self.workers_stop()
            print("Processing done. Have a nice day in the real world.")

    def stop(self):
        """Stops the analysis manager."""
        if self.workers:
            self.workers_stop()
class Multiprocess_Data(object):
    def __init__(self,
                 data_path,
                 batch_size,
                 capacity,
                 num_threads=4,
                 all_samples=None):
        self.jobs = JoinableQueue(capacity)
        self.results = Queue(capacity)
        self.path = data_path
        self.threads = []
        self.num_theads = num_threads
        self.batch_size = batch_size
        self.all_samples = all_samples
        self.create_processes()

    # 读取数据路径, 根据自己的数据可以重载该函数
    def read_path(self, path):
        datas = np.loadtxt(path, str)
        if self.all_samples == None:
            self.all_samples = datas.shape[0]
            print('train sets of numbers is: %d' % self.all_samples)
        return datas

    #处理数据函数,如旋转,缩放大小等, 根据自己的需求重载此函数
    def process_data(self, each_data_path):
        image_path, maskee_path = each_data_path
        imge = misc.imread(image_path)
        masker = misc.imread(maskee_path)
        img_re = misc.imresize(imge, (256, 256))
        masker_re = misc.imresize(masker, (256, 256))
        return (img_re, masker_re)

    def worker(self):
        while True:
            try:
                data_path = self.jobs.get()
                result = self.process_data(data_path)
                self.results.put(result)
            except Exception as err:
                report(err, True)
            finally:
                self.jobs.task_done()

    def add_jobs(self):
        temp = 0
        datas = self.read_path(self.path)
        while (True):
            temp += 1
            indices = np.arange(len(datas))
            np.random.shuffle(indices)
            datas = datas[indices]
            print('temp: %d' % temp)

            if not self.jobs.full():
                for item in datas:
                    self.jobs.put(item)

    def create_processes(self):
        add_job_process = Process(target=self.add_jobs)
        add_job_process.daemon = True
        add_job_process.start()

        self.threads.append(add_job_process)
        for _ in range(self.num_theads):
            worker_process = Process(target=self.worker)
            worker_process.daemon = True
            worker_process.start()
            self.threads.append(worker_process)

        print('进程数目: %s' % len(self.threads))

    def shuffle_batch(self):
        image_batch = []
        masker_batch = []
        for _ in range(self.batch_size):
            image, masker = self.results.get()
            masker = np.expand_dims(masker, axis=2)
            image_batch.append(image)
            masker_batch.append(masker)
        return np.stack(image_batch), np.stack(masker_batch)

    def end_processers(self):
        for process in self.threads:
            process.terminate()
            process.join()
Пример #9
0
class QueuedMultiProcessor:
    STOP_SIGNAL = "STOP"
    QUEUE_SIZE = 1500
    SLEEP_IF_FULL = 0.01

    def __init__(
        self,
        stream,
        worker,
        writer,
        worker_setup=None,
        writer_setup=None,
        writer_teardown=None,
        logger=None,
        chunksize=1,
    ):

        self.worker = worker
        self.worker_initializer = worker_setup

        self.writer = writer
        self.writer_initializer = writer_setup
        self.writer_teardown = writer_teardown

        self.chunksize = chunksize

        self.stream = stream
        self.logger = logger

        self.processes = []
        self.name = "Main Process"

    def writer_wrapper(self):
        self.name = "py_WRITER"
        setproctitle.setproctitle(self.name)
        if self.writer_initializer:
            self.writer_initializer()

        while True:
            chunk = self.out_queue.get()
            if chunk == self.STOP_SIGNAL:
                break
            for item in chunk:
                self.writer(item)

        if self.writer_teardown:
            if self.logger:
                self.logger.info("Tearing down Writer")
            self.writer_teardown()

    def worker_wrapper(self, i):
        self.name = f"py_WORKER_{i}"
        setproctitle.setproctitle(self.name)
        if self.worker_initializer:
            self.worker_initializer()

        while True:
            cache = []
            chunk = self.in_queue.get()
            if chunk == self.STOP_SIGNAL:
                self.in_queue.task_done()
                break
            for item in chunk:
                result = self.worker(item)
                cache.append(result)
            self.out_queue.put(cache)
            self.in_queue.task_done()

    def kill_all_workers(self, *args):
        if self.logger:
            self.logger.critical("KILLING %s", self.name)
        os.kill(os.getpid(), signal.SIGKILL)

    def _run(self, n_workers):

        self.in_queue = JoinableQueue(maxsize=self.QUEUE_SIZE)
        self.out_queue = Queue(maxsize=self.QUEUE_SIZE)

        signal.signal(signal.SIGINT, self.kill_all_workers)
        self.write_process = Process(target=self.writer_wrapper)
        self.write_process.start()

        for i, _ in enumerate(range(n_workers), 1):
            proc = Process(target=self.worker_wrapper, args=(i, ))
            proc.start()
            self.processes.append(proc)

        cache = []
        for item in self.stream:
            cache.append(item)
            while self.in_queue.full():
                time.sleep(self.SLEEP_IF_FULL)
            if len(cache) > self.chunksize:
                self.in_queue.put(cache)
                cache = []
        self.in_queue.put(cache)
        for _ in self.processes:
            self.in_queue.put(self.STOP_SIGNAL)

        if self.logger:
            self.logger.info("joining in_queue")
        self.in_queue.join()

        for worker in self.processes:
            worker.join()

        if self.logger:
            self.logger.info("killing processes")

        self.out_queue.put(self.STOP_SIGNAL)
        self.write_process.join()

    def run(self, n_workers=2):
        setproctitle.setproctitle("py_MAIN")
        self._run(n_workers)
Пример #10
0
class ProcessExecutor(Process, metaclass=ABCMeta):

    def __init__(self,
                 name,
                 shared_memory_handler,
                 CORE_POOL_SIZE: int,
                 MAX_POOL_SIZE: int,
                 KEEP_ALIVE_TIME: float,
                 RETYR_NUM: int,
                 worker_class,
                 TPS=-1):
        super().__init__()
        self.__stop_signal = False
        self.__pause_signal = False

        self.name = name

        self.shared_memory_handler = shared_memory_handler

        self.TPS = TPS

        self.RETRY_NUM = RETYR_NUM

        self.CORE_POOL_SIZE = CORE_POOL_SIZE
        self.MAX_POOL_SIZE = MAX_POOL_SIZE

        self.KEEP_ALIVE_TIME = KEEP_ALIVE_TIME

        self.job_queue = JoinableQueue(self.MAX_POOL_SIZE * 2)
        self.worker_class = worker_class

        self.workers = {}
        self.worker_states = {}
        self.__worker_count = 0
        self.__init_worker_states()

    def __init_worker_states(self):
        for i in range(self.MAX_POOL_SIZE):
            self.worker_states[str(i)] = const.WORKER_EMPTY

    def __get_first_empty_worker_id(self) -> int:
        for i in range(self.MAX_POOL_SIZE):
            if self.worker_states[str(i)] == const.WORKER_EMPTY:
                return i

    @property
    def pause_signal(self):
        return self.__pause_signal

    @property
    def stop_signal(self):
        return self.__stop_signal

    @property
    def worker_count(self):
        count = 0
        for k, v in self.worker_states.items():
            if v != const.WORKER_EMPTY:
                count += 1
        return count

    @stop_signal.setter
    def stop_signal(self, signal: bool):
        self.__stop_signal = signal

    @pause_signal.setter
    def pause_signal(self, signal: bool):
        self.__pause_signal = signal

    @abstractmethod
    def get_job(self) -> object:
        raise NotImplementedError

    @abstractmethod
    def create_worker(self, worker_class, is_core=True, init_job=None) -> object:
        raise NotImplementedError

    def update_worker_state(self, id_, state):
        if not self.worker_states.get(id_):
            return
        else:
            self.worker_states[id_] = state
            if state == const.WORKER_EMPTY:
                self.workers[id_] = None

    def __receiver_process(self):
        while not self.stop_signal:
            if self.pause_signal:
                time.sleep(1)
                continue

            job = self.get_job()
            # self.logger.info(self.worker_states)
            if self.worker_count < self.CORE_POOL_SIZE:
                core_worker = self.create_worker(self.worker_class, str(self.__get_first_empty_worker_id()), is_core=True, init_job=job)
                self.workers[str(core_worker.id)] = core_worker
                self.update_worker_state(core_worker.id, const.WORKER_INIT)
                core_worker.start()
            elif self.job_queue.full():
                if self.worker_count < self.MAX_POOL_SIZE:
                    non_core_worker = self.create_worker(self.worker_class, str(self.__get_first_empty_worker_id()), is_core=False, init_job=job)
                    self.workers[str(non_core_worker.id)]= non_core_worker
                    self.update_worker_state(non_core_worker.id, const.WORKER_INIT)
                    non_core_worker.start()
                else:
                    self.job_queue.put(job)
            else:
                self.job_queue.put(job)

    def __init_inner_log(self):
        MultiprocessLog.worker_configurer(self.shared_memory_handler.log_message_queue)
        self.logger = logging.getLogger(self.name)

    def ran(self):
        pass

    def __init_receiver(self):
        self.receiver = threading.Thread(target=self.__receiver_process)
        self.receiver.setDaemon(True)
        self.receiver.start()

    def before_waiting(self):
        pass

    def __wait_for_thread(self):
        self.receiver.join()

    def run(self):
        self.__init_inner_log()
        self.ran()
        self.__init_receiver()
        self.before_waiting()
        self.__wait_for_thread()
Пример #11
0
    consumers_events = map(lambda x: Event(), xrange(0, CPU_COUNT))
    consumers = map(
        lambda x: Process(target=utils.consume,
                          args=(candidates, consumers_events[x], (
                              samples, otsu, TARGET, name), check_candidate)),
        xrange(0, CPU_COUNT))
    for p in consumers:
        p.daemon = True
    map(lambda proc: proc.start(), consumers)

    producers_events = map(lambda x: Event(), xrange(0, CPU_COUNT))
    producers = map(
        lambda x: Process(target=sample_get,
                          args=(candidates, producers_events[x],
                                (size, FILE, MASK, (Xpdf, Ypdf)))),
        xrange(0, CPU_COUNT))
    for p in producers:
        p.daemon = True
    map(lambda proc: proc.start(), producers)

    while not samples.full():
        if samples.qsize() > 0:
            print samples.qsize()
        time.sleep(10)

    map(lambda event: event.set(), producers_events)
    candidates.join()
    map(lambda event: event.set(), consumers_events)
    time.sleep(1)
    end = datetime.now()
    print end - start
     histogram_merge_worker.start()

     if args.top:
          reader_procs = [ psutil.Process(reader.pid) for reader in readers ]
          worker_procs = [ psutil.Process(worker.pid) for worker in workers ]

     pair_buffer={}
     scaffold_count={}
#     while (not inq.empty()) or sum( [reader.is_alive() for reader in readers] )>0:
     while True:
          if args.debug: print("get")
          try:
               procid,scaffold,pairs = inq.get()
#               procid,scaffold,pairs = inq.get(True,10)
               #print("#got data:",procid,scaffold,len(pairs))
               print("#got data from inq:",procid,scaffold,len(pairs),inq.empty(),inq.qsize(),inq.full(),strftime("%Y-%m-%d %H:%M:%S"),sum( [reader.is_alive() for reader in readers] ),"q.size():",q.qsize(),file=sys.stderr,sep="\t")
               sys.stderr.flush()
               sys.stdout.flush()
          except Exception as e:
               print(e,file=sys.stderr)
               if args.top:
                    print("queue get timed out",[reader.cpu_percent() for reader in reader_procs],[worker.cpu_percent() for worker in worker_procs])
               #print("#timed out",inq.empty())
               print("#read from queue timed out:",inq.empty(),inq.qsize(),inq.full(),strftime("%Y-%m-%d %H:%M:%S"),sum( [reader.is_alive() for reader in readers] ),file=sys.stderr,sep="\t")
               sys.stderr.flush()
               continue
          if args.debug: print("got")
          if not scaffold in pair_buffer:
               pair_buffer[scaffold]=[]
          pair_buffer[scaffold] += pairs
          scaffold_count[scaffold] = scaffold_count.get(scaffold,0)+1