Exemple #1
0
    def __init__(self):
        # We expose the run_command event so RemoteCommands can hook to it,
        # so it can execute commands we receive from clients.
        self.events = EmitterGroup(source=self,
                                   auto_connect=True,
                                   run_command=None)

        # Must register all callbacks before we create our instance of
        # SharedMemoryManager.
        SharedMemoryManager.register('napari_shutting_down',
                                     callable=self._napari_shutting_down)
        SharedMemoryManager.register('commands', callable=self._commands)
        SharedMemoryManager.register('client_messages',
                                     callable=self._client_messages)
        SharedMemoryManager.register('data', callable=self._data)

        # We ask for port 0 which means let the OS choose a port. We send
        # the chosen port to the client in its NAPARI_MON_CLIENT variable.
        self._manager = SharedMemoryManager(address=('127.0.0.1', 0),
                                            authkey=str.encode('napari'))
        self._manager.start()

        # Get the shared resources.
        self._remote = NapariRemoteAPI(
            self._manager.napari_shutting_down(),
            self._manager.commands(),
            self._manager.client_messages(),
            self._manager.data(),
        )
Exemple #2
0
 def __init__(self, obs=-1, config=None):
     self._datasets = dict()
     self.smm = SharedMemoryManager()
     self.smm.start()
     self.conns = dict()
     self.obs = obs
     self.config = config
Exemple #3
0
def generate_sinusoids():
    smm = SharedMemoryManager()
    smm.start()

    W = []
    increment = 1.5 / (BITSTRING_SIZE + 1)

    t = np.linspace(0, (4 * np.pi) / TARGET_W, N)

    for i in range(BITSTRING_SIZE + 1):
        if (0.5 + i * increment) == 1:
            continue

        w = TARGET_W / (0.5 + i * increment)
        W.append(w)
        SINUSOIDS.append(0.25 * np.cos(w * t))

    TARGET = np.cos(TARGET_W * t)

    w_list = ""
    freqs = open("frequencies.ini", "w")
    for i in range(BITSTRING_SIZE):
        w_list += str(W[i]) + ","

    w_list = w_list[0:len(w_list) - 2]

    records = {"DEFAULT": {"frequencies": str(w_list), "target": TARGET_W}}

    record = configparser.ConfigParser()
    record.read_dict(records)
    record.write(freqs)

    freqs.close()
Exemple #4
0
    def __init__(self, config: dict, on_shutdown: Callable[[], None]):
        super().__init__()
        self.config = config
        self._on_shutdown = on_shutdown
        self._running = False

        LOGGER.info("Starting process %s", os.getpid())
        _log_env()  # Log our startup environment.

        server_port = config['server_port']
        LOGGER.info("connecting to napari on port %d.", server_port)

        # We have to register these before creating the SharedMemoryManager.
        # Note that we don't have to give the types, just the names.
        # Although to use them we probably want to know the types!
        for name in NapariRemoteAPI.RESOURCES:
            SharedMemoryManager.register(name)

        # Connect to napari's shared memory on the server_port that napari
        # passed us in our NAPARI_MON_CLIENT configuration.
        self._manager = SharedMemoryManager(
            address=('localhost', config['server_port']),
            authkey=str.encode('napari'),
        )
        self._manager.connect()

        # Get the shared resources as a convenient named tuple.
        self._remote = NapariRemoteAPI.from_manager(self._manager)

        # Start our thread which will poll napari.
        self.start()
Exemple #5
0
 def __init__(self, addr=None, manager=None, mutex=None, format_list=None,
              size=0, ratio=2):
     if mutex is None:
         self.mutex = RLock()
     else:
         self.mutex = mutex
     if manager is None:
         self._manager = DummyManager()
     elif isinstance(manager, SharedMemoryManager) or isinstance(manager, DummyManager):
         self._manager = manager
     else:
         self._manager = SharedMemoryManager(manager)
     capacity = int(size*ratio)
     if capacity == 0:
         capacity = ratio
     with self.mutex:
         if addr is None:
             if format_list is None:
                 raise ValueError("Either addr or format_list must be provided")
             self._shl = self._manager.ShareableList(format_list)
             self._shl_addr = self._shl.shm.name
             self._shm = self._manager.SharedMemory(capacity)
             self._shm_addr = self._shm.name
             self._shl[0] = self._shm_addr
             self._shl[1] = int(size)
             self._shl[2] = int(capacity)
         else:
             self._shl_addr = addr
             self._shl = shared_memory.ShareableList(name=addr)
             self._shm_addr = self._shl[0]
             self._shm = shared_memory.SharedMemory(name=self._shm_addr)
Exemple #6
0
 def __init__(self, obs=1000, config=None):
     self._datasets = dict()
     self.smm = SharedMemoryManager()
     self.smm.start()
     self.result = Manager().list()
     self.conns = dict()
     self.obs = obs
     self.config = config
Exemple #7
0
def start_shared_memory_manager():
    # type: () -> SharedMemoryManager
    """ Starts the shared memory manager.

    :return: Shared memory manager instance.
    """
    smm = SharedMemoryManager(address=('', PORT), authkey=AUTH_KEY)
    smm.start()
    return smm
Exemple #8
0
    def __init__(
        self,
        function: Callable[[Message[TPayload]], TTransformed],
        next_step: ProcessingStep[TTransformed],
        processes: int,
        max_batch_size: int,
        max_batch_time: float,
        input_block_size: int,
        output_block_size: int,
        metrics: MetricsBackend,
    ) -> None:
        self.__transform_function = function
        self.__next_step = next_step
        self.__max_batch_size = max_batch_size
        self.__max_batch_time = max_batch_time

        self.__shared_memory_manager = SharedMemoryManager()
        self.__shared_memory_manager.start()

        self.__pool = Pool(
            processes,
            initializer=parallel_transform_worker_initializer,
            context=multiprocessing.get_context("spawn"),
        )

        self.__input_blocks = [
            self.__shared_memory_manager.SharedMemory(input_block_size)
            for _ in range(processes)
        ]

        self.__output_blocks = [
            self.__shared_memory_manager.SharedMemory(output_block_size)
            for _ in range(processes)
        ]

        self.__batch_builder: Optional[BatchBuilder[TPayload]] = None

        self.__results: Deque[Tuple[MessageBatch[TPayload], AsyncResult[Tuple[
            int, MessageBatch[TTransformed]]], ]] = deque()

        self.__metrics = metrics
        self.__batches_in_progress = Gauge(metrics, "batches_in_progress")
        self.__pool_waiting_time: Optional[float] = None

        self.__closed = False

        def handle_sigchld(signum: int, frame: Any) -> None:
            # Terminates the consumer if any child process of the
            # consumer is terminated.
            # This is meant to detect the unexpected termination of
            # multiprocessor pool workers.
            if not self.__closed:
                self.__metrics.increment("sigchld.detected")
                raise ChildProcessTerminated()

        signal.signal(signal.SIGCHLD, handle_sigchld)
Exemple #9
0
def load_shared_memory_manager():
    # type: () -> None
    """ Connects to the main shared memory manager initiated in piper_worker.py.

    :return: None
    """
    global SHARED_MEMORY_MANAGER
    SHARED_MEMORY_MANAGER = SharedMemoryManager(address=(IP, PORT),
                                                authkey=AUTH_KEY)
    SHARED_MEMORY_MANAGER.connect()
Exemple #10
0
    def __init__(self, maze: Maze = None, memory_manager: SharedMemoryManager = None):
        self._maze = maze
        self.display = None
        self.memory_manager = memory_manager
        self.pos = [0, 0]
        self._data = {}

        if self.memory_manager is None:
            self.memory_manager = SharedMemoryManager()
            self.memory_manager.start()
Exemple #11
0
    def __init__(self,
                 cfgs: List[VideoConfig],
                 scfg: ServerConfig,
                 stream_path: Path,
                 sample_path: Path,
                 frame_path: Path,
                 region_path: Path,
                 offline_path: Path = None,
                 build_pool=True) -> None:
        super().__init__()
        # self.cfgs = I.load_video_config(cfgs)[-1:]
        # self.cfgs = I.load_video_config(cfgs)
        # self.cfgs = [c for c in self.cfgs if c.enable]
        # self.cfgs = [c for c in cfgs if enable_options[c.index]]
        self.scfg = scfg
        self.cfgs = cfgs
        self.quit = False
        # Communication Pipe between detector and stream receiver
        self.pipes = [Manager().Queue(c.max_streams_cache) for c in self.cfgs]
        self.time_stamp = generate_time_stamp('%m%d')
        self.stream_path = stream_path / self.time_stamp
        self.sample_path = sample_path / self.time_stamp
        self.frame_path = frame_path / self.time_stamp
        self.region_path = region_path / self.time_stamp
        self.offline_path = offline_path
        self.process_pool = None
        self.thread_pool = None
        self.shut_down_event = Manager().Event()
        self.shut_down_event.clear()
        self.scheduler = BackgroundScheduler()
        if build_pool:
            # build service
            # pool_size = min(len(cfgs) * 2, cpu_count() - 1)
            # self.process_pool = Pool(processes=pool_size)
            self.process_pool = Pool(processes=len(self.cfgs) * 5)
            self.thread_pool = ThreadPoolExecutor()
        # self.clean()
        self.stream_receivers = [
            stream.StreamReceiver(self.stream_path / str(c.index),
                                  offline_path, c, self.pipes[idx])
            for idx, c in enumerate(self.cfgs)
        ]

        self.scheduler.add_job(self.notify_shut_down,
                               'cron',
                               month=self.scfg.cron['end']['month'],
                               day=self.scfg.cron['end']['day'],
                               hour=self.scfg.cron['end']['hour'],
                               minute=self.scfg.cron['end']['minute'])
        self.scheduler.start()
        self.frame_cache_manager = SharedMemoryManager()
        self.frame_cache_manager.start()
Exemple #12
0
 def _manager_test(self, dtype):
     with SharedMemoryManager() as smm:
         address = smm.address
         vector = SMVector(manager=address, dtype=dtype)
         for i in range(10):
             vector.push_back(i)
         vector.__del__()
Exemple #13
0
def compute_julia(nr_points=100,
                  pool_size=2,
                  work_size=15,
                  verbose=False,
                  max_iters=255,
                  max_norm=2.0):
    size = nr_points**2
    z_size = np.dtype(np.complex).itemsize
    n_size = np.dtype(np.int32).itemsize
    with SharedMemoryManager() as shmem_mgr:
        with mp.Pool(pool_size) as pool:
            z_shmem = shmem_mgr.SharedMemory(size=z_size * size)
            z_buf = np.ndarray((size, ), dtype=np.complex, buffer=z_shmem.buf)
            z_buf[:] = init_z(nr_points)
            n_shmem = shmem_mgr.SharedMemory(size=n_size * size)
            n_buf = np.ndarray((size, ), dtype=np.int32, buffer=n_shmem.buf)
            n_buf[:] = np.zeros((size, ), dtype=np.int32)
            args = [(z_shmem, n_shmem, i * work_size,
                     min(z_buf.size, (i + 1) * work_size), max_iters, max_norm)
                    for i in range(int(np.ceil(z_buf.size / work_size)))]
            if verbose:
                print(args, file=sys.stderr)
            pid_counter = Counter()
            for pid in pool.imap_unordered(compute_partial_julia, args):
                pid_counter[pid] += 1
            if verbose:
                print(pid_counter, file=sys.stderr)
            return n_buf.copy().reshape(nr_points, nr_points)
Exemple #14
0
    def __init__(
        self,
        function: Callable[[Message[TPayload]], TTransformed],
        next_step: ProcessingStep[TTransformed],
        processes: int,
        max_batch_size: int,
        max_batch_time: float,
        input_block_size: int,
        output_block_size: int,
        metrics: MetricsBackend,
    ) -> None:
        self.__transform_function = function
        self.__next_step = next_step
        self.__max_batch_size = max_batch_size
        self.__max_batch_time = max_batch_time

        self.__shared_memory_manager = SharedMemoryManager()
        self.__shared_memory_manager.start()

        self.__pool = Pool(
            processes,
            initializer=parallel_transform_worker_initializer,
            context=multiprocessing.get_context("spawn"),
        )

        self.__input_blocks = [
            self.__shared_memory_manager.SharedMemory(input_block_size)
            for _ in range(processes)
        ]

        self.__output_blocks = [
            self.__shared_memory_manager.SharedMemory(output_block_size)
            for _ in range(processes)
        ]

        self.__batch_builder: Optional[BatchBuilder[TPayload]] = None

        self.__results: Deque[
            Tuple[
                MessageBatch[TPayload],
                AsyncResult[Tuple[int, MessageBatch[TTransformed]]],
            ]
        ] = deque()

        self.__batches_in_progress = Gauge(metrics, "batches_in_progress")

        self.__closed = False
Exemple #15
0
    def _init_memory(self, positions: np.ndarray, velocities: np.ndarray, masses: np.ndarray):
        """ Prepares shared memory arrays. """
        # setup process that sets up shared memory
        self._memory_manager = SharedMemoryManager()
        self._memory_manager.start()

        max_nodes = self.bodies + 64

        # create shared memory buffers
        self._positions_shm = self._memory_manager.SharedMemory(positions.nbytes)
        self._velocities_shm = self._memory_manager.SharedMemory(velocities.nbytes)
        self._accelerations_shm = self._memory_manager.SharedMemory(velocities.nbytes)
        self._masses_shm = self._memory_manager.SharedMemory(masses.nbytes)

        self._nodes_positions_shm = self._memory_manager.SharedMemory(np.empty((max_nodes, 3), np.float).nbytes)
        self._nodes_masses_shm = self._memory_manager.SharedMemory(np.empty((max_nodes, ), np.float).nbytes)
        self._nodes_sizes_shm = self._memory_manager.SharedMemory(np.empty((max_nodes, ), np.float).nbytes)
        self._nodes_children_types_shm = self._memory_manager.SharedMemory(np.empty((max_nodes, 8), np.int).nbytes)
        self._nodes_children_ids_shm = self._memory_manager.SharedMemory(np.empty((max_nodes, 8), np.int).nbytes)

        # setup NumPy arrays
        self._data = SharedData(
            time_step=self.time_step,
            theta=self._theta,
            gravitational_constant=self.gravitational_constant,
            softening=self.softening,

            nodes_count=Value('i', 0),

            positions=np.ndarray((self.bodies, 3), dtype=np.float, buffer=self._positions_shm.buf),
            velocities=np.ndarray((self.bodies, 3), dtype=np.float, buffer=self._velocities_shm.buf),
            accelerations=np.ndarray((self.bodies, 3), dtype=np.float, buffer=self._accelerations_shm.buf),
            masses=np.ndarray((self.bodies, ), dtype=np.float, buffer=self._masses_shm.buf),

            nodes_positions=np.ndarray((max_nodes, 3), dtype=np.float, buffer=self._nodes_positions_shm.buf),
            nodes_masses=np.ndarray((max_nodes, ), dtype=np.float, buffer=self._nodes_masses_shm.buf),
            nodes_sizes=np.ndarray((max_nodes, ), dtype=np.float, buffer=self._nodes_sizes_shm.buf),
            nodes_children_types=np.ndarray((max_nodes, 8), dtype=np.int, buffer=self._nodes_children_types_shm.buf),
            nodes_children_ids=np.ndarray((max_nodes, 8), dtype=np.int, buffer=self._nodes_children_ids_shm.buf)
        )

        # copy data into shared arrays
        self._data.positions[:] = positions[:]
        self._data.velocities[:] = velocities[:]
        self._data.masses[:] = masses[:]
Exemple #16
0
 def __init__(self, manager: SharedMemoryManager, cache_size, unit,
              shape) -> None:
     self.unit = unit
     self.shape = shape
     self.cache_size = cache_size
     self.total_bytes = self.unit * self.cache_size
     self.cache_block = manager.SharedMemory(size=self.total_bytes)
     self.lock = Manager().Lock()
     self.st_id = Manager().Value('i', cache_size)
     self.et_id = Manager().Value('i', -1)
Exemple #17
0
def main():
    smm = SharedMemoryManager()
    smm.start()
    ls = smm.ShareableList(range(2000))
    with Pool(4) as p:
        print(*list(p.imap_unordered(f, ls)), sep='\n')
        # print(p.map(f, [2, 3, 4, 5, 6]))  # lock
        # print(p.map(f, [2, 3, 4, 5, 6]))
    smm.shutdown()
Exemple #18
0
    def multiprocess(self, n, nthread=8):
        """

        多进程 并发

        :return:
        """

        print('Parent process %s.' % os.getpid())

        p = Pool(nthread)  # 进程池, 和系统申请 nthread 个进程

        smm = SharedMemoryManager()  #TODO: pyrhon3.8+ 才有
        smm.start()  # Start the process that manages the shared memory blocks

        cache_list = smm.ShareableList([0] * n)
        # 限制了可被存储在其中的值只能是 int, float, bool, str (每条数据小于10M), bytes (每条数据小于10M)以及 None 这些内置类型。
        # 它另一个显著区别于内置 list 类型的地方在于它的长度无法修改(比如,没有 append, insert 等操作)
        # 且不支持通过切片操作动态创建新的 ShareableList  实例。

        shm_a = smm.SharedMemory(size=n)
        # shm_a.buf[:] = bytearray([0]*n)
        # shm_a.buf[:] = [0] * n

        print('shm_a id in main process: {} '.format(id(shm_a)))

        # 主进程的内存空间 和 子进程的内存空间 的考察
        self.global_array = [0] * n
        print('array id in main process: {} '.format(id(self.global_array)))

        self.global_string = 'abc'
        print('string id in main process: {} '.format(id(self.global_string)))

        self.global_int = 10
        print('int id in main process: {} '.format(id(self.global_int)))

        for i in range(n):

            # p.apply_async(task, args=(cache_name,i)) # apply_async 异步取回结果
            p.apply_async(self.task, args=(cache_list, shm_a, i))

        print('Waiting for all subprocesses done...')
        p.close()

        p.join()

        print('All subprocesses done.')

        smm.shutdown()

        return cache_list, shm_a
Exemple #19
0
  def train(self):
    print("----- Train Start -----")

    with SharedMemoryManager() as smm:
      res_queue = Queue()

      model_weights = self.global_model.get_weights()
      weight_memory = [smm.SharedMemory(size=x.nbytes) for x in model_weights]
      weight_names, weight_shapes, weight_dtype = [], [], None
      shared_weights = []
      for memory, weight in zip(weight_memory, model_weights):
        print(memory.name)
        new_weight = np.ndarray(weight.shape, dtype=weight.dtype, buffer=memory.buf)
        new_weight[:] = weight[:]
        weight_names.append(memory.name)
        weight_shapes.append(weight.shape)
        weight_dtype = weight.dtype
        shared_weights.append(new_weight)
      print("----- Worker Start -----")



      workers = [Worker(
        self.state_size,
        self.action_size,
        weight_names,
        weight_shapes, 
        weight_dtype,
        self.opt, 
        res_queue,
        i, 
        write_fp=self.save_dir
      ) for i in range(1)] #mp.cpu_count())]

      print("----- Worker Start -----")
      for worker in workers:
        worker.start()
      for worker in workers:
        worker.join()

    res_queue_items = []
    try:
      while True:
        res_queue_items.append(res_queue.get())
    except:
      pass

    print(res_queue_items)
    plt.plot(res_queue_items)
    plt.ylabel('Moving average ep reward')
    plt.xlabel('Step')
    plt.savefig(self.save_dir + 'Moving Average.png')
    plt.show()
Exemple #20
0
def create_shared_memory_manager(address, authkey):
    # type: (typing.Tuple[str, int], typing.Optional[bytes]) -> SharedMemoryManager
    """ Create a new shared memory manager process at the given address with
    the provided authkey.

    :param address: Shared memory manager address (IP, PORT)
    :param authkey: Shared memory manager authentication key
    :return: New process
    """
    smm = SharedMemoryManager(address=address,
                              authkey=authkey)
    return smm
Exemple #21
0
def test_parallel_transform_worker_apply() -> None:
    messages = [
        Message(
            Partition(Topic("test"), 0),
            i,
            KafkaPayload(None, b"\x00" * size, None),
            datetime.now(),
        ) for i, size in enumerate([1000, 1000, 2000, 4000])
    ]

    with SharedMemoryManager() as smm:
        input_block = smm.SharedMemory(8192)
        assert input_block.size == 8192

        input_batch = MessageBatch(input_block)
        for message in messages:
            input_batch.append(message)

        assert len(input_batch) == 4

        output_block = smm.SharedMemory(4096)
        assert output_block.size == 4096

        index, output_batch = parallel_transform_worker_apply(
            transform_payload_expand,
            input_batch,
            output_block,
        )

        # The first batch should be able to fit 2 messages.
        assert index == 2
        assert len(output_batch) == 2

        index, output_batch = parallel_transform_worker_apply(
            transform_payload_expand,
            input_batch,
            output_block,
            index,
        )

        # The second batch should be able to fit one message.
        assert index == 3
        assert len(output_batch) == 1

        # The last message is too large to fit in the batch.
        with pytest.raises(ValueTooLarge):
            parallel_transform_worker_apply(
                transform_payload_expand,
                input_batch,
                output_block,
                index,
            )
def test_share_memory_rw():
    cache_size = 10
    init_frame = np.zeros((2160, 3840, 3), dtype=np.uint8)
    global_index = Manager().Value('i', 1)
    smm = SharedMemoryManager()
    smm.start()
    s1 = SharedMemoryFrameCache(smm, cache_size, init_frame.nbytes,
                                init_frame.shape)
    with Pool(2) as pool:
        r1 = pool.apply_async(blur, args=(
            s1,
            global_index,
            'Shared Memory',
        ))
        r2 = pool.apply_async(receive,
                              args=(
                                  s1,
                                  global_index,
                                  'Shared Memory',
                              ))
        r1.get()
        r2.get()
 def shared(self):
     args = np.reshape(range(len(self.arr)), (10, int(len(self.arr) / 10)))
     with SharedMemoryManager() as smm:
         shm = smm.SharedMemory(size=self.arr.nbytes)
         sh_arr = np.ndarray(self.arr.shape,
                             dtype=self.arr.dtype,
                             buffer=shm.buf)
         sh_arr = self.arr
         with Pool(self.threads,
                   initializer=initProcess,
                   initargs=(sh_arr, )) as p:
             out = p.starmap(arr_, [(args[i], ) for i in range(10)])
     return out
Exemple #24
0
def lif_feed_forward_benchmark(parameters: BenchmarkParameters):
    shared = SharedMemoryManager()
    shared.start()
    params = list(parameters._asdict().values())
    shared_list = shared.ShareableList(params)

    run(["python3", __file__, shared_list.shm.name], stderr=STDOUT)
    duration = shared_list[0]
    shared_list.shm.close()
    shared.shutdown()
    return duration
Exemple #25
0
    def __init__(self, config: dict, client_name="?"):
        super().__init__()
        assert config
        self.config = config
        self.client_name = client_name

        self.running = True
        self.napari_data = None

        LOGGER.info("Starting MonitorClient process %s", os.getpid())
        _log_env()

        server_port = config['server_port']
        LOGGER.info("Connecting to port %d...", server_port)

        # Right now we just need to magically know these callback names,
        # maybe we can come up with a better way.
        napari_api = ['shutdown_event', 'command_queue', 'data']
        for name in napari_api:
            SharedMemoryManager.register(name)

        # Connect to napari's shared memory.
        self._manager = SharedMemoryManager(
            address=('localhost', config['server_port']),
            authkey=str.encode('napari'),
        )
        self._manager.connect()

        # Get the shared resources.
        self._shared = SharedResources(
            self._manager.shutdown_event(),
            self._manager.command_queue(),
            self._manager.data(),
        )

        # Start our thread so we can poll napari.
        self.start()
Exemple #26
0
def parallel_run_sims(sim,
                      n_runs=1,
                      n_cores=1,
                      shrink=True,
                      disable_progress=False):
    ''' Run n_runs simulations in parallel with the indicated 
        number of cores.  Returns a list of completed simulations.
        Use this in place of the MultiSim.run() method. 
        
        sim: a Covasim Sim object
        n_runs:  Number of simulations to run.  Default 1.
        n_cores: Number of cores to use. Default 1.
        shrink: Remove the People from completed simulations.  Saves RAM. Default True.
                This is always False if n_runs=1 for debugging.
        disable_progress: Disable the progress bars.  Default False.
        
        For n_runs=1 run it does not parallelize to help with debugging.'''
    sims_complete = []
    # Generator for the parallel sims.
    try:
        if n_runs > 1:
            print('Running %s simulations with %s cores.' % (n_runs, n_cores))
            sys.stdout.flush()
            # the context managers for the sharedmemorymanager and pool will
            # automatically clean up.
            with SharedMemoryManager() as smm:
                sim_str = pickle.dumps(sim)
                del sim  # save RAM
                sim_shm = smm.SharedMemory(size=len(sim_str))
                sim_shm.buf[:] = sim_str[:]
                del sim_str
                sims_data = gen_sims(sim_shm, n_runs, shrink)
                with mp.Pool(n_cores) as pool:  # maxtasksperchild=1)
                    for res in tqdm.tqdm(pool.imap_unordered(
                            unpickle_and_run_sim, sims_data),
                                         total=n_runs,
                                         disable=disable_progress):
                        sims_complete.append(res)
        else:
            # Don't run in parallel - this helps with debugging 1 process
            sims_complete.append(parfun(sim, shrink=False))
    except Exception as e:
        print('Failure running in parallel!')
        print(e)
        raise e

    print('Simulations complete.')
    return sims_complete
Exemple #27
0
def promedios():
    curso1 = [random.randint(1, 7) for _ in range(30)]
    curso2 = [random.randint(1, 7) for _ in range(30)]
    curso3 = [random.randint(1, 7) for _ in range(30)]
    curso4 = [random.randint(1, 7) for _ in range(30)]

    with SharedMemoryManager() as smm:
        notas = smm.ShareableList([0, 0, 0, 0])
        process = []
        process.append(Process(target=calc_promedio, args=(notas, 0, curso1)))
        process.append(Process(target=calc_promedio, args=(notas, 1, curso2)))
        process.append(Process(target=calc_promedio, args=(notas, 2, curso3)))
        process.append(Process(target=calc_promedio, args=(notas, 3, curso4)))
        for p in process:
            p.start()
        for p in process:
            p.join()
        for i, n in enumerate(notas):
            print(f"promedio curso {i+1} es {n:.2f}")
def initialize_shared_ndarray_for_reading(shape):
    # make an array to store terrain
    init_grid = np.random.normal(0, 1, shape)
    #print(init_grid)

    # create a section of shared memory of the same size as the grid array
    with SharedMemoryManager() as smm:
        #shm = shared_memory.SharedMemory(create=True, size=init_grid.nbytes)
        shm = smm.SharedMemory(size=init_grid.nbytes)

        # create another ndarray of the same shape & type as grid, backed by shared memory
        prev_grid = np.ndarray(init_grid.shape,
                               dtype=init_grid.dtype,
                               buffer=shm.buf)

        np.copyto(prev_grid, init_grid)

    print("shared array", shm.name, "has been initialized")
    return shm.name, init_grid.shape, init_grid.dtype
Exemple #29
0
def compute(array_size, pool_size, chunk_size, verbose=False):
    with SharedMemoryManager() as shmem_manager:
        with Pool(pool_size) as pool:
            dtype = np.int32
            t_size = np.dtype(dtype).itemsize
            shmem_data = shmem_manager.SharedMemory(size=t_size *
                                                    array_size**2)
            data = np.ndarray((array_size, array_size),
                              dtype=dtype,
                              buffer=shmem_data.buf)
            for i in range(data.shape[0]):
                for j in range(data.shape[1]):
                    data[i, j] = i * array_size + j
            args = [(shmem_data, np.int32, i + 1, i * chunk_size,
                     min((i + 1) * chunk_size, data.size))
                    for i in range(int(np.ceil(data.size / chunk_size)))]
            for result in pool.imap_unordered(increment, args):
                if verbose:
                    print(result)
            return data.copy()
Exemple #30
0
def test_message_batch() -> None:
    partition = Partition(Topic("test"), 0)

    with SharedMemoryManager() as smm:
        block = smm.SharedMemory(4096)
        assert block.size == 4096

        message = Message(partition, 0, KafkaPayload(None, b"\x00" * 4000,
                                                     None), datetime.now())

        batch: MessageBatch[KafkaPayload] = MessageBatch(block)
        with assert_changes(lambda: len(batch), 0, 1):
            batch.append(message)

        assert batch[0] == message
        assert list(batch) == [message]

        with assert_does_not_change(lambda: len(batch),
                                    1), pytest.raises(ValueTooLarge):
            batch.append(message)