Ejemplo n.º 1
0
Archivo: mem.py Proyecto: fox0/CelestAI
def main():
    smm = SharedMemoryManager()
    smm.start()
    ls = smm.ShareableList(range(2000))
    with Pool(4) as p:
        print(*list(p.imap_unordered(f, ls)), sep='\n')
        # print(p.map(f, [2, 3, 4, 5, 6]))  # lock
        # print(p.map(f, [2, 3, 4, 5, 6]))
    smm.shutdown()
Ejemplo n.º 2
0
def lif_feed_forward_benchmark(parameters: BenchmarkParameters):
    shared = SharedMemoryManager()
    shared.start()
    params = list(parameters._asdict().values())
    shared_list = shared.ShareableList(params)

    run(["python3", __file__, shared_list.shm.name], stderr=STDOUT)
    duration = shared_list[0]
    shared_list.shm.close()
    shared.shutdown()
    return duration
Ejemplo n.º 3
0
    def multiprocess(self, n, nthread=8):
        """

        多进程 并发

        :return:
        """

        print('Parent process %s.' % os.getpid())

        p = Pool(nthread)  # 进程池, 和系统申请 nthread 个进程

        smm = SharedMemoryManager()  #TODO: pyrhon3.8+ 才有
        smm.start()  # Start the process that manages the shared memory blocks

        cache_list = smm.ShareableList([0] * n)
        # 限制了可被存储在其中的值只能是 int, float, bool, str (每条数据小于10M), bytes (每条数据小于10M)以及 None 这些内置类型。
        # 它另一个显著区别于内置 list 类型的地方在于它的长度无法修改(比如,没有 append, insert 等操作)
        # 且不支持通过切片操作动态创建新的 ShareableList  实例。

        shm_a = smm.SharedMemory(size=n)
        # shm_a.buf[:] = bytearray([0]*n)
        # shm_a.buf[:] = [0] * n

        print('shm_a id in main process: {} '.format(id(shm_a)))

        # 主进程的内存空间 和 子进程的内存空间 的考察
        self.global_array = [0] * n
        print('array id in main process: {} '.format(id(self.global_array)))

        self.global_string = 'abc'
        print('string id in main process: {} '.format(id(self.global_string)))

        self.global_int = 10
        print('int id in main process: {} '.format(id(self.global_int)))

        for i in range(n):

            # p.apply_async(task, args=(cache_name,i)) # apply_async 异步取回结果
            p.apply_async(self.task, args=(cache_list, shm_a, i))

        print('Waiting for all subprocesses done...')
        p.close()

        p.join()

        print('All subprocesses done.')

        smm.shutdown()

        return cache_list, shm_a
Ejemplo n.º 4
0
def main():

    # This is the number of values that the writer will send to the reader
    items_to_send = random.randint(100000, 1000000)
    smm = SharedMemoryManager()
    smm.start()

    # Create a ShareableList to be used between the processes
    size = BUFFER_SIZE + 2
    shared_list = smm.ShareableList(range(size))
    for i in range(0, size):
      shared_list[i] = -1

    # TODO - Create any lock(s) or semaphore(s) that you feel you need
    lock = mp.Lock()
    sem = mp.Semaphore(BUFFER_SIZE)

    # TODO - create reader and writer processes
    items_sent = 0
    items_read = 0
    p1 = mp.Process(target=write, args=(shared_list, items_sent, items_to_send, sem, lock))
    p2 = mp.Process(target=read, args=(shared_list, items_read, items_to_send, sem, lock))

    # TODO - Start the processes and wait for them to finish
    p1.start()
    p2.start()

    p1.join()
    p2.join()

    print(f'{items_to_send} sent by the writer')

    # TODO - Display the number of numbers/items received by the reader.
    print(f'{shared_list[-1]} read by the reader')

    smm.shutdown()
Ejemplo n.º 5
0
def main():

    # This is the number of values that the writer will send to the reader
    items_to_send = random.randint(10000, 100000)

    smm = SharedMemoryManager()
    smm.start()

    # TODO - Create a ShareableList to be used between the processes

    # TODO - Create any lock(s) or semaphore(s) that you feel you need

    # TODO - create reader and writer processes

    # TODO - Start the processes and wait for them to finish

    print(f'{items_to_send} values sent')

    # TODO - Display the number of numbers/items received by the reader.
    #        Can not use "items_to_send", must be a value collected
    #        by the reader processes.
    # print(f'{<your variable>} values received')

    smm.shutdown()
Ejemplo n.º 6
0
class ParallelSimulation(Simulation):
    """ Parallel simulation of Barnes-Hut algorithm realised using shared memory. """

    def __init__(self, positions: np.ndarray, velocities: np.ndarray, masses: np.ndarray, params: Namespace):
        super().__init__(positions, velocities, masses, params)
        self._theta = params.theta

        self._init_memory(positions, velocities, masses)
        self._init_workers()

        atexit.register(self._cleanup)

    def _init_memory(self, positions: np.ndarray, velocities: np.ndarray, masses: np.ndarray):
        """ Prepares shared memory arrays. """
        # setup process that sets up shared memory
        self._memory_manager = SharedMemoryManager()
        self._memory_manager.start()

        max_nodes = self.bodies + 64

        # create shared memory buffers
        self._positions_shm = self._memory_manager.SharedMemory(positions.nbytes)
        self._velocities_shm = self._memory_manager.SharedMemory(velocities.nbytes)
        self._accelerations_shm = self._memory_manager.SharedMemory(velocities.nbytes)
        self._masses_shm = self._memory_manager.SharedMemory(masses.nbytes)

        self._nodes_positions_shm = self._memory_manager.SharedMemory(np.empty((max_nodes, 3), np.float).nbytes)
        self._nodes_masses_shm = self._memory_manager.SharedMemory(np.empty((max_nodes, ), np.float).nbytes)
        self._nodes_sizes_shm = self._memory_manager.SharedMemory(np.empty((max_nodes, ), np.float).nbytes)
        self._nodes_children_types_shm = self._memory_manager.SharedMemory(np.empty((max_nodes, 8), np.int).nbytes)
        self._nodes_children_ids_shm = self._memory_manager.SharedMemory(np.empty((max_nodes, 8), np.int).nbytes)

        # setup NumPy arrays
        self._data = SharedData(
            time_step=self.time_step,
            theta=self._theta,
            gravitational_constant=self.gravitational_constant,
            softening=self.softening,

            nodes_count=Value('i', 0),

            positions=np.ndarray((self.bodies, 3), dtype=np.float, buffer=self._positions_shm.buf),
            velocities=np.ndarray((self.bodies, 3), dtype=np.float, buffer=self._velocities_shm.buf),
            accelerations=np.ndarray((self.bodies, 3), dtype=np.float, buffer=self._accelerations_shm.buf),
            masses=np.ndarray((self.bodies, ), dtype=np.float, buffer=self._masses_shm.buf),

            nodes_positions=np.ndarray((max_nodes, 3), dtype=np.float, buffer=self._nodes_positions_shm.buf),
            nodes_masses=np.ndarray((max_nodes, ), dtype=np.float, buffer=self._nodes_masses_shm.buf),
            nodes_sizes=np.ndarray((max_nodes, ), dtype=np.float, buffer=self._nodes_sizes_shm.buf),
            nodes_children_types=np.ndarray((max_nodes, 8), dtype=np.int, buffer=self._nodes_children_types_shm.buf),
            nodes_children_ids=np.ndarray((max_nodes, 8), dtype=np.int, buffer=self._nodes_children_ids_shm.buf)
        )

        # copy data into shared arrays
        self._data.positions[:] = positions[:]
        self._data.velocities[:] = velocities[:]
        self._data.masses[:] = masses[:]

    def _init_workers(self):
        """ Prepares pool of workers. """
        self._pool = Pool(
            processes=self._params.processes,
            initializer=worker.initialize,
            initargs=(self._data, )
        )

    def _cleanup(self):
        """ Cleans up shared memory and pool of workers. """
        self._pool.terminate()
        self._memory_manager.shutdown()
        print('Memory manager was shut down.')

    def simulate(self) -> Iterable[Tuple[np.ndarray, np.ndarray, np.ndarray]]:
        """ Runs parallel implementation of Barnes-Hut simulation. """
        while True:
            self._build_octree()
            self._update_accelerations()
            self._update_positions()

            yield self._data.positions, self._data.velocities, self._data.accelerations

    def _build_octree(self):
        """ Builds octree used in Barnes-Hut. """
        global_coords_min = np.repeat(np.min(self._data.positions), 3)
        global_coords_max = np.repeat(np.max(self._data.positions), 3)
        global_coords_mid = (global_coords_min + global_coords_max) / 2

        # manually build first node
        self._data.nodes_count.value = 1
        self._data.nodes_positions[0] = np.average(self._data.positions, axis=0, weights=self._data.masses)
        self._data.nodes_masses[0] = np.sum(self._data.masses)
        self._data.nodes_sizes[0] = global_coords_max[0] - global_coords_min[0]

        # calculate base octant for each body
        bodies_base_octant = np.sum((self._data.positions > global_coords_mid) * [1, 2, 4], axis=1)

        tasks_targets = []
        tasks_args = []

        # build second layer of nodes and collect tasks
        for octant in range(8):
            coords_min, coords_max = octant_coords(global_coords_min, global_coords_max, octant)
            coords_mid = (coords_min + coords_max) / 2

            # get indices of bodies in this octant
            octant_bodies = np.argwhere(bodies_base_octant == octant).flatten()

            # if node is empty or has one body handle it separately
            if octant_bodies.size == 0:
                self._data.nodes_children_types[0, octant] = OCTANT_EMPTY
                continue
            if octant_bodies.size == 1:
                self._data.nodes_children_types[0, octant] = OCTANT_BODY
                self._data.nodes_children_ids[0, octant] = octant_bodies[0]
                continue

            # create node
            node_id = self._data.nodes_count.value
            self._data.nodes_count.value = node_id + 1
            self._data.nodes_children_types[0, octant] = OCTANT_NODE
            self._data.nodes_children_ids[0, octant] = node_id

            self._data.nodes_positions[node_id] = np.average(self._data.positions[octant_bodies], axis=0, weights=self._data.masses[octant_bodies])
            self._data.nodes_masses[node_id] = np.sum(self._data.masses[octant_bodies])
            self._data.nodes_sizes[node_id] = coords_max[0] - coords_min[0]

            # split bodies into sub octants
            bodies_sub_octant = np.sum((self._data.positions[octant_bodies] > coords_mid) * [1, 2, 4], axis=1)

            # create tasks
            for i in range(8):
                tasks_targets.append((node_id, i))
                tasks_args.append((
                    octant_bodies[bodies_sub_octant == i],
                    *octant_coords(coords_min, coords_max, i)
                ))

        # run tasks
        results = self._pool.starmap(worker.build_octree_branch, tasks_args)

        # update references in nodes
        for (node_id, i), (sub_node_type, sub_node_id) in zip(tasks_targets, results):
            self._data.nodes_children_types[node_id, i] = sub_node_type
            self._data.nodes_children_ids[node_id, i] = sub_node_id

    def _update_accelerations(self):
        """ Calculates accelerations of the bodies. """
        if self.bodies < 2:
            return

        self._pool.map(worker.update_acceleration, range(self.bodies))

    def _update_positions(self):
        """ Calculates positions of the bodies. """
        self._pool.map(worker.update_position, range(self.bodies))

    @property
    def positions(self) -> np.ndarray:
        return self._data.positions

    @property
    def velocities(self) -> np.ndarray:
        return self._data.velocities

    @property
    def masses(self) -> np.ndarray:
        return self._data.masses

    @property
    def accelerations(self) -> np.ndarray:
        return self._data.accelerations
Ejemplo n.º 7
0
class DataManager:
    def __init__(self, obs=1000, config=None):
        self._datasets = dict()
        self.smm = SharedMemoryManager()
        self.smm.start()
        self.result = Manager().list()
        self.conns = dict()
        self.obs = obs
        self.config = config

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        self.shutdown()

    def shutdown(self):
        self.smm.shutdown()
        for conn in self.conns.values():
            conn.close()

    def _add_to_shared_memory(self, nparray: recarray) -> SharedMemory:
        """Internal function to copy an array into shared memory.

        Parameters
        ----------
        nparray : recarray
            The array to be copied into shared memory.

        Returns
        -------
        SharedMemoryName
            The shared memory object.
        """
        shm = self.smm.SharedMemory(nparray.nbytes)
        array = recarray(shape=nparray.shape, dtype=nparray.dtype, buf=shm.buf)
        copyto(array, nparray)
        return shm

    def _download_dataset(self, dataset: Dataset) -> recarray:
        """Internal function to download the dataset.

        Parameters
        ----------
        dataset : Dataset
            Dataset information including source, library, table, vars, etc.

        Returns
        -------
        recarray
            `numpy.recarry` of the downloaded dataset.
        """
        # TODO: generic login data for different data sources
        if dataset.source == 'wrds':
            usr = self.config.get('wrds_username')
            pwd = self.config.get('wrds_password')
        # If there exists a connection for the data source, use it!
        if (conn := self.conns.get(dataset.source, None)) is None:
            module = import_module(f'frds.data.{dataset.source}')
            conn = module.Connection(usr=usr, pwd=pwd)
            self.conns.update({dataset.source: conn})
        df = conn.get_table(library=dataset.library,
                            table=dataset.table,
                            columns=dataset.vars,
                            date_cols=dataset.date_vars,
                            obs=self.obs)
        assert isinstance(df, DataFrame)
        return df.to_records(index=False)
Ejemplo n.º 8
0
class ParallelTransformStep(ProcessingStep[TPayload]):
    def __init__(
        self,
        function: Callable[[Message[TPayload]], TTransformed],
        next_step: ProcessingStep[TTransformed],
        processes: int,
        max_batch_size: int,
        max_batch_time: float,
        input_block_size: int,
        output_block_size: int,
        metrics: MetricsBackend,
    ) -> None:
        self.__transform_function = function
        self.__next_step = next_step
        self.__max_batch_size = max_batch_size
        self.__max_batch_time = max_batch_time

        self.__shared_memory_manager = SharedMemoryManager()
        self.__shared_memory_manager.start()

        self.__pool = Pool(
            processes,
            initializer=parallel_transform_worker_initializer,
            context=multiprocessing.get_context("spawn"),
        )

        self.__input_blocks = [
            self.__shared_memory_manager.SharedMemory(input_block_size)
            for _ in range(processes)
        ]

        self.__output_blocks = [
            self.__shared_memory_manager.SharedMemory(output_block_size)
            for _ in range(processes)
        ]

        self.__batch_builder: Optional[BatchBuilder[TPayload]] = None

        self.__results: Deque[Tuple[MessageBatch[TPayload], AsyncResult[Tuple[
            int, MessageBatch[TTransformed]]], ]] = deque()

        self.__metrics = metrics
        self.__batches_in_progress = Gauge(metrics, "batches_in_progress")
        self.__pool_waiting_time: Optional[float] = None

        self.__closed = False

        def handle_sigchld(signum: int, frame: Any) -> None:
            # Terminates the consumer if any child process of the
            # consumer is terminated.
            # This is meant to detect the unexpected termination of
            # multiprocessor pool workers.
            if not self.__closed:
                self.__metrics.increment("sigchld.detected")
                raise ChildProcessTerminated()

        signal.signal(signal.SIGCHLD, handle_sigchld)

    def __submit_batch(self) -> None:
        assert self.__batch_builder is not None
        batch = self.__batch_builder.build()
        logger.debug("Submitting %r to %r...", batch, self.__pool)
        self.__results.append((
            batch,
            self.__pool.apply_async(
                parallel_transform_worker_apply,
                (self.__transform_function, batch, self.__output_blocks.pop()),
            ),
        ))
        self.__batches_in_progress.increment()
        self.__metrics.timing("batch.size.msg", len(batch))
        self.__metrics.timing("batch.size.bytes", batch.get_content_size())
        self.__batch_builder = None

    def __check_for_results(self, timeout: Optional[float] = None) -> None:
        input_batch, result = self.__results[0]

        # If this call is being made in a context where it is intended to be
        # nonblocking, checking if the result is ready (rather than trying to
        # retrieve the result itself) avoids costly synchronization.
        if timeout == 0 and not result.ready():
            # ``multiprocessing.TimeoutError`` (rather than builtin
            # ``TimeoutError``) maintains consistency with ``AsyncResult.get``.
            raise multiprocessing.TimeoutError()

        i, output_batch = result.get(timeout=timeout)

        # TODO: This does not handle rejections from the next step!
        for message in output_batch:
            self.__next_step.poll()
            self.__next_step.submit(message)

        if i != len(input_batch):
            logger.warning(
                "Received incomplete batch (%0.2f%% complete), resubmitting...",
                i / len(input_batch) * 100,
            )
            # TODO: This reserializes all the ``SerializedMessage`` data prior
            # to the processed index even though the values at those indices
            # will never be unpacked. It probably makes sense to remove that
            # data from the batch to avoid unnecessary serialization overhead.
            self.__results[0] = (
                input_batch,
                self.__pool.apply_async(
                    parallel_transform_worker_apply,
                    (
                        self.__transform_function,
                        input_batch,
                        output_batch.block,
                        i,
                    ),
                ),
            )
            return

        logger.debug("Completed %r, reclaiming blocks...", input_batch)
        self.__input_blocks.append(input_batch.block)
        self.__output_blocks.append(output_batch.block)
        self.__batches_in_progress.decrement()

        del self.__results[0]

    def poll(self) -> None:
        self.__next_step.poll()

        while self.__results:
            try:
                self.__check_for_results(timeout=0)
            except multiprocessing.TimeoutError:
                if self.__pool_waiting_time is None:
                    self.__pool_waiting_time = time.time()
                else:
                    current_time = time.time()
                    if current_time - self.__pool_waiting_time > LOG_THRESHOLD_TIME:
                        logger.warning(
                            "Waited on the process pool longer than %d seconds. Waiting for %d results. Pool: %r",
                            LOG_THRESHOLD_TIME,
                            len(self.__results),
                            self.__pool,
                        )
                        self.__pool_waiting_time = current_time
                break
            else:
                self.__pool_waiting_time = None

        if self.__batch_builder is not None and self.__batch_builder.ready():
            self.__submit_batch()

    def __reset_batch_builder(self) -> None:
        try:
            input_block = self.__input_blocks.pop()
        except IndexError as e:
            raise MessageRejected("no available input blocks") from e

        self.__batch_builder = BatchBuilder(
            MessageBatch(input_block),
            self.__max_batch_size,
            self.__max_batch_time,
        )

    def submit(self, message: Message[TPayload]) -> None:
        assert not self.__closed

        if self.__batch_builder is None:
            self.__reset_batch_builder()
            assert self.__batch_builder is not None

        try:
            self.__batch_builder.append(message)
        except ValueTooLarge as e:
            logger.debug("Caught %r, closing batch and retrying...", e)
            self.__submit_batch()

            # This may raise ``MessageRejected`` (if all of the shared memory
            # is in use) and create backpressure.
            self.__reset_batch_builder()
            assert self.__batch_builder is not None

            # If this raises ``ValueTooLarge``, that means that the input block
            # size is too small (smaller than the Kafka payload limit without
            # compression.)
            self.__batch_builder.append(message)

    def close(self) -> None:
        self.__closed = True

        if self.__batch_builder is not None and len(self.__batch_builder) > 0:
            self.__submit_batch()

    def terminate(self) -> None:
        self.__closed = True

        logger.debug("Terminating %r...", self.__pool)
        self.__pool.terminate()

        logger.debug("Shutting down %r...", self.__shared_memory_manager)
        self.__shared_memory_manager.shutdown()

        logger.debug("Terminating %r...", self.__next_step)
        self.__next_step.terminate()

    def join(self, timeout: Optional[float] = None) -> None:
        deadline = time.time() + timeout if timeout is not None else None

        logger.debug("Waiting for %s batches...", len(self.__results))
        while self.__results:
            self.__check_for_results(
                timeout=max(deadline -
                            time.time(), 0) if deadline is not None else None)

        self.__pool.close()

        logger.debug("Waiting for %s...", self.__pool)
        # ``Pool.join`` doesn't accept a timeout (?!) but this really shouldn't
        # block for any significant amount of time unless something really went
        # wrong (i.e. we lost track of a task)
        self.__pool.join()

        self.__shared_memory_manager.shutdown()

        self.__next_step.close()
        self.__next_step.join(
            timeout=max(deadline -
                        time.time(), 0) if deadline is not None else None)
Ejemplo n.º 9
0
    def _batched_train_test(self):

        #batch_size = len(self.GPU_LIST)*self.model_per_gpu

        smm = SharedMemoryManager()
        smm.start()

        X_shared = _to_shared_mem(smm, self.X)
        w_shared = _to_shared_mem(smm, self.w)
        delta_shared = _to_shared_mem(smm, self.delta)

        with Manager() as manager:
            param_dict_mngd = manager.dict({
                'param_dicts_train':
                self.param_dicts_train,
                'param_dicts_test':
                self.param_dicts_test,
                'data_idx':
                self.data_idx,
                'X_shared_name':
                X_shared.name,
                'X_shape':
                self.X.shape,
                'X_dtype':
                self.X.dtype,
                'w_shared_name':
                w_shared.name,
                'w_shape':
                self.w.shape,
                'w_dtype':
                self.w.dtype,
                'delta_shared_name':
                delta_shared.name,
                'delta_shape':
                self.delta.shape,
                'delta_dtype':
                self.delta.dtype,
                'batch_size':
                self.batch_size,
                'batch_idx':
                None,
                'test_block_size':
                self.test_block_size
            })

            rslt_mngd = manager.list([0] * len(self.param_dicts_test))

            for batch_idx in tqdm(range(
                    int(len(self.param_dicts_train) / self.batch_size) + 1),
                                  desc="batched cross validation"):

                param_dict_mngd['batch_idx'] = batch_idx
                '''
                _run_batch_process(param_dict_mngd, rslt_mngd)
                print (rslt_mngd)
                raise
                '''
                p = mp.Process(target=_run_batch_process,
                               args=(param_dict_mngd, rslt_mngd))

                p.start()
                p.join()
                p.terminate()

            smm.shutdown()

            self.rslts = list(rslt_mngd)
Ejemplo n.º 10
0
class DataManager(Singleton):
    """DataManager loads data from sources and manages the shared memory"""
    def __init__(self, obs=-1, config=None):
        self._datasets = dict()
        self.smm = SharedMemoryManager()
        self.smm.start()
        self.conns = dict()
        self.obs = obs
        self.config = config

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        self.shutdown()

    def shutdown(self):
        """Shutdown the shared memory manager and all data connections"""
        self.smm.shutdown()
        for conn in self.conns.values():
            conn.close()

    def _add_to_shared_memory(self, nparray: np.recarray) -> SharedMemory:
        """Internal function to copy an array into shared memory.

        Parameters
        ----------
        nparray : np.recarray
            The array to be copied into shared memory.

        Returns
        -------
        SharedMemory
            The shared memory object.
        """
        shm = self.smm.SharedMemory(nparray.nbytes)
        array = np.recarray(nparray.shape, dtype=nparray.dtype, buf=shm.buf)
        np.copyto(array, nparray)
        return shm

    def _download_dataset(self, dataset: Dataset) -> pd.DataFrame:
        """Internal function to download the dataset.

        Parameters
        ----------
        dataset : Dataset
            Dataset information including source, library, table, vars, etc.

        Returns
        -------
        pd.DataFrame
            DataFrame of the downloaded dataset.
        """
        # TODO: generic login data for different data sources
        if dataset.source == "wrds":
            usr = self.config.get("wrds_username")
            pwd = self.config.get("wrds_password")
        # If there exists a connection for the data source, use it!
        if (conn := self.conns.get(dataset.source, None)) is None:
            module = import_module(f"frds.data.{dataset.source}")
            conn = module.Connection(usr=usr, pwd=pwd)
            self.conns.update({dataset.source: conn})
        return conn.get_table(
            library=dataset.library,
            table=dataset.table,
            columns=dataset.vars,
            date_cols=dataset.date_vars,
            obs=self.obs,
        )
Ejemplo n.º 11
0
    a[1] = "Proceso 1"
    print('\n hola soy el proceso 1 \n' +
          'Imprimiendo la lista compartida \n' + str(a))


def workerProceso2():
    #Obtenemos la lista por su nombre
    b = ShareableList(name=lc.shm.name)
    #Agregamos a la lista un string
    b[2] = "Proceso 2"
    print('\n hola soy el proceso 2 \n' +
          'Imprimiendo la lista compartida: \n' + str(b))


#Definimos los procesos
p1 = Process(target=workerProceso1)
p2 = Process(target=workerProceso2)

p1.start()
p2.start()
p1.join()
p2.join()

#Imprimimos la lista para ver si los cambios han sido efectivamente aplicados.

print('\n Imprimiendo la lista para ver si se efectuaron los cambios: \n' +
      str(lc))

#Damos de baja el gestor de bloques de memoria compartida
smm.shutdown()
Ejemplo n.º 12
0
class MonitorClient(Thread):
    """Client for napari shared memory monitor.

    Napari launches us. We get config information from our NAPARI_MON_CLIENT
    environment variable. That contains a port number to connect to.
    We connect our SharedMemoryManager to that port.

    We get these resources from the manager:

    1) shutdown_event()

    If this is set napari is exiting. Ususally it exists so fast we get
    at ConnectionResetError exception instead of see this was set. We have
    no clean way to exit the SocketIO server yet.

    2) command_queue()

    We put command onto this queue for napari to execute.

    3) data()

    Data from napari's monitor.add() command.
    """
    def __init__(self, config: dict, client_name="?"):
        super().__init__()
        assert config
        self.config = config
        self.client_name = client_name

        self.running = True
        self.napari_data = None

        LOGGER.info("Starting MonitorClient process %s", os.getpid())
        _log_env()

        server_port = config['server_port']
        LOGGER.info("Connecting to port %d...", server_port)

        # Right now we just need to magically know these callback names,
        # maybe we can come up with a better way.
        napari_api = ['shutdown_event', 'command_queue', 'data']
        for name in napari_api:
            SharedMemoryManager.register(name)

        # Connect to napari's shared memory.
        self._manager = SharedMemoryManager(
            address=('localhost', config['server_port']),
            authkey=str.encode('napari'),
        )
        self._manager.connect()

        # Get the shared resources.
        self._shared = SharedResources(
            self._manager.shutdown_event(),
            self._manager.command_queue(),
            self._manager.data(),
        )

        # Start our thread so we can poll napari.
        self.start()

    def run(self) -> None:
        """Check shared memory for new data."""

        LOGGER.info("MonitorClient thread is running...")

        while True:
            if not self._poll():
                LOGGER.info("Exiting...")
                break

            time.sleep(POLL_INTERVAL_MS / 1000)

        # webmon checks this and stops/exits.
        self.running = False

    def _poll(self) -> bool:
        """See if there is now information in shared mem."""

        # LOGGER.info("Poll...")
        try:
            if self._shared.shutdown.is_set():
                # We sometimes do see the shutdown event was set. But usually
                # we just get ConnectionResetError, because napari is exiting.
                LOGGER.info("Shutdown event was set.")
                return False  # Stop polling
        except ConnectionResetError:
            LOGGER.info("ConnectionResetError.")
            return False  # Stop polling

        # Do we need to copy here?
        self.napari_data = {
            "tile_config": self._shared.data.get('tile_config'),
            "tile_state": self._shared.data.get('tile_state'),
        }

        if DUMP_DATA_FROM_NAPARI:
            pretty_str = json.dumps(self.napari_data, indent=4)
            LOGGER.info("New data from napari: %s", pretty_str)

        return True  # Keep polling

    def post_command(self, command) -> None:
        """Send new command to napari.
        """
        LOGGER.info(f"Posting command {command}")

        try:
            self._shared.commands.put(command)
        except ConnectionRefusedError:
            self._log("ConnectionRefusedError")

    def stop(self) -> None:
        """Call on shutdown. TODO_MON: no one calls this yet?"""
        self._manager.shutdown()
Ejemplo n.º 13
0
def parallel_eval_expr(n, ops, goal, timeout=1, proc_cnt=16):
    q, q_type = make_tasks(n, ops)
    task_cnt = len(q)
    task_ranges = calc_task_ranges(proc_cnt, q)
    smm = SharedMemoryManager()
    smm.start()
    task_mem, tq = make_shared_mem(smm, task_cnt, q_type)
    tq[:] = q[:]
    del q
    res_mem, res = make_shared_mem(smm, task_cnt, np.int8)
    res[:] = -2  # initial value indicating that result is not being calculated
    start_time_mem, start_time = make_shared_mem(smm, proc_cnt, np.float_)
    status_mem, status = make_shared_mem(smm, proc_cnt, np.int64)
    status[:] = ProcStatus.IDLE
    cur_idx_mem, cur_idx = make_shared_mem(smm, proc_cnt, np.int64)
    cur_idx[:] = -1
    processes = [None] * proc_cnt
    restarts = [0] * proc_cnt
    cur_t = time.time()
    while not all(status[i] == ProcStatus.DONE for i in range(proc_cnt)):
        print_status(res, status, cur_idx, restarts, proc_cnt)
        try:
            for i, p in enumerate(processes):
                if p is None:
                    continue
                if status[i] == ProcStatus.DONE:
                    continue
                if (cur_t - start_time[i]) > timeout:
                    p.kill()
                    if cur_idx[i] < 0:
                        continue
                    res[cur_idx[i]] = -1

            for i, p in enumerate(processes):
                if p is not None and p.is_alive():
                    continue
                if status[i] == ProcStatus.DONE:
                    try:
                        p.close()
                    except Exception:
                        pass
                    processes[i] = None
                    continue
                try:
                    p.close()
                except Exception:
                    pass
                is_new_proc = cur_idx[i] == -1
                idx = task_ranges[i][0] if is_new_proc else cur_idx[i]
                end_idx = task_ranges[i][1]
                restarts[i] += 1
                processes[i] = Process(
                    target=eval_f,
                    args=(
                        task_mem.name,
                        res_mem.name,
                        status_mem.name,
                        start_time_mem.name,
                        cur_idx_mem.name,
                        i,
                        # skipping the one that takes too long
                        idx + (0 if is_new_proc else 1),
                        end_idx,
                        goal,
                        task_cnt,
                        q_type))
                processes[i].start()
        except Exception:
            import traceback
            traceback.print_exc()

        cur_t = time.time()
        time.sleep(timeout)

    print_status(res, status, cur_idx, restarts, proc_cnt)

    for p in processes:
        if p is None:
            continue
        p.join()
        p.close()

    for pf in tq[np.argwhere(res == 1)].flatten():
        pf = pf.decode('ascii')
        print(eval_psfx(pf), '==', postfix_to_infix(pf), pf)

    smm.shutdown()