def setup(self, mem_name, mem_size, sem_name): self.memory_size = mem_size self.memory_name = mem_name self.semaphore_name = sem_name try: self.memory = posix_ipc.SharedMemory(self.memory_name, posix_ipc.O_CREX, size=self.memory_size) self.semaphore = posix_ipc.Semaphore(self.semaphore_name, posix_ipc.O_CREX) except posix_ipc.ExistentialError as e: # if memory already exists, let's connect to it self.memory = posix_ipc.SharedMemory(self.memory_name, posix_ipc.O_RDWR, size=self.memory_size) self.semaphore = posix_ipc.Semaphore(self.semaphore_name, posix_ipc.O_RDWR) self.mapfile = mmap.mmap(self.memory.fd, self.memory.size) # Once I've mmapped the file descriptor, I can close it without interfering with the mmap. self.memory.close_fd() self.semaphore.release()
def _init_shnd(param_arrays, grad_arrays, nd, is_chief): l = len(grad_arrays) size = 24 * 2 * l print(is_chief) for i, pair in enumerate(zip(param_arrays, grad_arrays)): arg_list, grad_list = pair a,b,c,d = grad_list[0]._to_shared_mem() shm = posix_ipc.SharedMemory('/home/grad', posix_ipc.O_CREAT, size=size) buf = mmap.mmap(shm.fd, size) shape = (l, 2) mx_shm_key = np.ndarray(shape, int, buf, order = 'C') if is_chief == 0: mx_shm_key[i][0] = a mx_shm_key[i][1] = b y = nnd._new_from_shared_mem(mx_shm_key[i][0], mx_shm_key[i][1], grad_list[0].shape, np.float32) nd.append(nnd.NDArray(y)) if is_chief == 0: nd[i].zeros_like() if is_chief == 0: size = 12 * 24 shm = posix_ipc.SharedMemory('/home/sync', posix_ipc.O_CREAT, size=size) buf = mmap.mmap(shm.fd, size) shape = 12 sync = np.ndarray(shape, int, buf, order = 'C') for i in range(shape): sync[i] = 0
def __init__(self, shape, dtype=np.float64, name=None): """Creates a new SharedNDArray. If name is left blank, a new POSIX shared memory segment is created using a random name. Args: shape: Shape of the wrapped ndarray. dtype: Data type of the wrapped ndarray. name: Optional; the filesystem path of the underlying POSIX shared memory. Returns: A new SharedNDArray of the given shape and dtype and backed by the given optional name. Raises: SharedNDArrayError: if an error occurs. """ size = int(np.prod(shape)) * np.dtype(dtype).itemsize if name: self._shm = posix_ipc.SharedMemory(name) else: self._shm = posix_ipc.SharedMemory(None, posix_ipc.O_CREX, size=size) self._buf = mmap.mmap(self._shm.fd, size) self.array = np.ndarray(shape, dtype, self._buf, order='C')
def launch_agent(self, args): launch_args = [str(args.agent.as_posix())] + shlex.split(" ".join( args.agent_args)) log.debug("Agent launch command: \"{}\"".format(launch_args)) self.agent = subprocess.Popen(launch_args) shm = None for i in range(10): time.sleep(1) try: shm = posix_ipc.SharedMemory('/lancetcontrol', 0) except posix_ipc.ExistentialError: continue break assert shm is not None buffer = mmap.mmap(shm.fd, ctypes.sizeof(AgentControlBlock), mmap.MAP_SHARED, mmap.PROT_WRITE) self.acb = AgentControlBlock.from_buffer(buffer) # Map the stats if self.acb.agent_type == 0: for i in range(self.acb.thread_count): shm = posix_ipc.SharedMemory('/lancet-stats{}'.format(i), 0) buffer = mmap.mmap(shm.fd, ctypes.sizeof(ThroughputStats), mmap.MAP_SHARED, mmap.PROT_WRITE) self.thread_stats.append(ThroughputStats.from_buffer(buffer)) elif (self.acb.agent_type == 1 or self.acb.agent_type == 2 or self.acb.agent_type == 3): for i in range(self.acb.thread_count): shm = posix_ipc.SharedMemory('/lancet-stats{}'.format(i), 0) buffer = mmap.mmap(shm.fd, ctypes.sizeof(LatencyStats), mmap.MAP_SHARED, mmap.PROT_WRITE) self.thread_stats.append(LatencyStats.from_buffer(buffer)) else: assert False
def init_shared_params(self, job_name, params, param_sync_rule, cleanup=False): """ Intialize shared memory parameters. This must be called before accessing the params attribute and/or calling :meth:`sync_params`, :meth:`lock_params` or :meth:`unlock_params`. Paramters --------- job_name : str An identifier. This must be the same across all Workers that share paramters. params : shared variables Theano shared variables representing the weights of your model. param_sync_rule : ParamSyncRule Update rule for the parameters cleanup : bool Whether to cleanup a previous run with the same identifier. Will also copy the current values of `params` to the shared memory. This is required on certain platform due to system restrictions. """ self.update_fn = param_sync_rule.make_update_function(params) if cleanup: try: posix_ipc.unlink_semaphore(job_name + 'lock') except posix_ipc.ExistentialError: pass self.lock = posix_ipc.Semaphore(job_name + 'lock', posix_ipc.O_CREAT, initial_value=1) params_descr = [(numpy.dtype(p.dtype), p.get_value(borrow=True).shape) for p in params] params_size = sum(descr_size(*d) for d in params_descr) if cleanup: try: posix_ipc.unlink_shared_memory(job_name + 'params') except posix_ipc.ExistentialError: pass self._shmref = posix_ipc.SharedMemory(job_name + 'params', posix_ipc.O_CREAT, size=params_size) self._shmref = posix_ipc.SharedMemory(job_name + 'params') self._shm = _mmap(fd=self._shmref.fd, length=params_size) self._shmref.close_fd() self.shared_params = [] off = 0 for dtype, shape in params_descr: self.shared_params.append( numpy.ndarray(shape, dtype=dtype, buffer=self._shm, offset=off)) off += descr_size(dtype, shape)
def __init__(self, shape, dtype=np.float64, name=None): size = int(np.prod(shape)) * np.dtype(dtype).itemsize if name: self._shm = posix_ipc.SharedMemory(name) else: self._shm = posix_ipc.SharedMemory(None, posix_ipc.O_CREX, size=size) self._buf = mmap.mmap(self._shm.fd, size) self.array = np.ndarray(shape, dtype, self._buf)
def create_shmem(self, name, size): try: memory = ipc.SharedMemory(name, ipc.O_CREX, 0o660, size) except ipc.ExistentialError: logger.warning('Deleting shared memory %s', name) self.destroy_shmem(*self.open_shmem(name)) memory = ipc.SharedMemory(name, ipc.O_CREX, 0o660, size) mapfile = mmap.mmap(memory.fd, memory.size) return memory, mapfile
def init_shared_params(self, params, param_sync_rule): """ Initialize shared memory parameters. This must be called before accessing the params attribute and/or calling :meth:`sync_params`. Parameters ---------- params : list of :ref:`theano.compile.SharedVariable` Theano shared variables representing the weights of your model. param_sync_rule : :class:`param_sync.ParamSyncRule` Update rule for the parameters """ self.update_fn = param_sync_rule.make_update_function(params) self.local_params = params params_descr = [(numpy.dtype(p.dtype), p.get_value(borrow=True).shape) for p in params] params_size = sum(self._get_descr_size(*d) for d in params_descr) shared_mem_name = "{}_params".format(self._job_uid) # Acquire lock to decide who will init the shared memory self.lock() need_init = self.send_req("platoon-need_init") if need_init: # The ExistentialError is apparently the only way to verify # if the shared_memory exists. try: posix_ipc.unlink_shared_memory(shared_mem_name) except posix_ipc.ExistentialError: pass self._shmref = posix_ipc.SharedMemory(shared_mem_name, posix_ipc.O_CREAT, size=params_size) else: self._shmref = posix_ipc.SharedMemory(shared_mem_name) self._shm = mmap(fd=self._shmref.fd, length=params_size) self._shmref.close_fd() self.shared_params = [] off = 0 for dtype, shape in params_descr: self.shared_params.append( numpy.ndarray(shape, dtype=dtype, buffer=self._shm, offset=off)) off += self._get_descr_size(dtype, shape) if need_init: self.copy_to_global(synchronous=False) self.unlock()
def shared_mem(self): """Create or return already existing shared memory object.""" try: return posix_ipc.SharedMemory( self.safe_shm_name, size=len(pickle.dumps(self.__internal_dict)) ) except posix_ipc.ExistentialError: return posix_ipc.SharedMemory( self.safe_shm_name, flags=posix_ipc.O_CREX, size=posix_ipc.PAGE_SIZE )
def init_shms(self): self.shms = [] size = self.obj_size for i in range(self.obj_cnt): if self.name: _shm = posix_ipc.SharedMemory(self.name) else: _shm = posix_ipc.SharedMemory(None, posix_ipc.O_CREX, size=size) self.shms.append(_shm)
def __init__(self, tag, size, create=True): self._mem = None self._map = None self._owner = create self.size = size assert 0 <= size < sys.maxint flag = (0, posix_ipc.O_CREX)[create] if create: self._mem = posix_ipc.SharedMemory(tag, flags=flag, size=size) else: self._mem = posix_ipc.SharedMemory(tag, flags=flag, size=0) self._map = mmap.mmap(self._mem.fd, self._mem.size) self._mem.close_fd()
def __init__(self, name=None, obj_size=1, obj_cnt=1): self.name = name self.obj_size = obj_size self.obj_cnt = obj_cnt size = obj_size * obj_cnt if name: self._shm = posix_ipc.SharedMemory(name) else: self._shm = posix_ipc.SharedMemory(None, posix_ipc.O_CREX, size=size) self._buf = mmap.mmap(self._shm.fd, size)
def __init__(self, name): oldumask = os.umask(0) self.memUp = posix_ipc.SharedMemory("Up", posix_ipc.O_CREAT, mode=0666, size=32768) self.memAux = posix_ipc.SharedMemory("Aux", posix_ipc.O_CREAT, mode=0666, size=1024) os.umask(oldumask) self.mfUp = mmap.mmap(self.memUp.fd, self.memUp.size) self.mfAux = mmap.mmap(self.memAux.fd, self.memAux.size) self.s = eth_test.setup_sock() [res, aux] = eth_test.read_mem_buf(self.s)
def init_sizes(self): #self.sizes = [0] * self.obj_cnt shape = (self.obj_cnt, ) dtype = np.int32 #np.float32 name = self.name size = int(np.prod(shape)) * np.dtype(dtype).itemsize if name: self._shm_size = posix_ipc.SharedMemory(name) else: self._shm_size = posix_ipc.SharedMemory(None, posix_ipc.O_CREX, size=size) self._buf_size = mmap.mmap(self._shm_size.fd, size) self.sizes = np.ndarray(shape, dtype, self._buf_size, order='C')
def client_run(): memory = posix_ipc.SharedMemory("pyflink") client_semaphore = posix_ipc.Semaphore("client") server_semaphore = posix_ipc.Semaphore("server") map_file = mmap.mmap(memory.fd, memory.size) size = (1 << 20) + 1 duration = 10 end = time.time() + duration msgs = 0 while time.time() < end: server_semaphore.acquire() map_file.seek(0) data = map_file.read(size) while data[-1] != 49: map_file.seek(0) data = map_file.read(size) # print(data[-2:-1]) client_semaphore.release() server_semaphore.acquire() map_file.seek(0) data = map_file.read(size) while data[-1] != 50: map_file.seek(0) data = map_file.read(size) # print(data[-2:-1]) client_semaphore.release() msgs += 2 map_file.close() print('Received {} messages in {} second(s).'.format(msgs, duration))
def __init__(self, path, size=None): if self.signature is None: raise ValueError("No signature for this memory datastructure") if size: self.size = size self.path = path # TODO: Investigate having separate read and write locks to allow # concurrent reads. self.semaphore = posix_ipc.Semaphore( self.path + "-sem", flags=posix_ipc.O_CREAT, mode=0o660, initial_value=1, ) try: self.shm = posix_ipc.SharedMemory( self.path, flags=posix_ipc.O_CREAT, mode=0o660, size=self.size, ) except ValueError as e: raise ValueError("Unable to allocate shared memory segment" "(potentially out of memory).\n" "Error was: %s" % e) self.mmap = mmap.mmap(self.shm.fd, self.size)
def test_ftruncate_increase(self): """exercise increasing the size of an existing segment from 0 via ftruncate()""" mem = posix_ipc.SharedMemory(None, posix_ipc.O_CREX) self.assertEqual(mem.size, 0) new_size = _get_block_size() os.ftruncate(mem.fd, new_size) self.assertEqual(mem.size, new_size)
def test_ctor_second_handle_size_decrease(self): """exercise decreasing the size of an existing segment via a second handle to it""" new_size = self.original_size // 2 mem = posix_ipc.SharedMemory(self.mem.name, size=new_size) self.assertEqual(mem.size, new_size) self.assertEqual(self.mem.size, new_size) mem.close_fd()
def test_ctor_fd_can_become_zero(self): """test that SharedMemory accepts 0 as valid file descriptor""" # ref: https://github.com/osvenskan/posix_ipc/issues/2 # This test relies on OS compliance with the POSIX spec. Specifically, the spec for # shm_open() says -- # # shm_open() shall return a file descriptor for the shared memory # object that is the lowest numbered file descriptor not currently # open for that process. # # ref: http://pubs.opengroup.org/onlinepubs/009695399/functions/shm_open.html # # So, on systems compliant with that particular part of the spec, if I open a SharedMemory # segment after closing stdin (which has fd == 0), the SharedMemory segment, should be # assigned fd 0. os.close(0) # I have to supply a size here, otherwise the call to close_fd() will fail under macOS. # See here for another report of the same behavior: # https://stackoverflow.com/questions/35371133/close-on-shared-memory-in-osx-causes-invalid-argument-error mem = posix_ipc.SharedMemory(None, posix_ipc.O_CREX, size=4096) mem_fd = mem.fd # Clean up before attempting the assertion in case the assertion fails. mem.close_fd() mem.unlink() self.assertEqual(mem_fd, 0)
def test_unlink_shared_memory(self): """Exercise unlink_shared_memory""" mem = posix_ipc.SharedMemory(None, posix_ipc.O_CREX, size=1024) mem.close_fd() posix_ipc.unlink_shared_memory(mem.name) self.assertRaises(posix_ipc.ExistentialError, posix_ipc.SharedMemory, mem.name)
def init_global_params(self, params): print '%s init global params'%self.rank def _mmap(length=0, prot=0x3, flags=0x1, fd=0, offset=0): _ffi = cffi.FFI() _ffi.cdef("void *mmap(void *, size_t, int, int, int, size_t);") _lib = _ffi.dlopen(None) addr = _ffi.NULL m = _lib.mmap(addr, length, prot, flags, fd, offset) if m == _ffi.cast('void *', -1): raise OSError(_ffi.errno, "for mmap") return _ffi.buffer(m, length) def _get_descr_size(dtype, shape): size = dtype.itemsize for s in shape: size *= s return size params_descr = [(numpy.dtype(p.dtype), p.get_value(borrow=True).shape) for p in params] params_size = sum(_get_descr_size(*d) for d in params_descr) shared_mem_name = str(self.job_id) self.lock() self._shmref = posix_ipc.SharedMemory( shared_mem_name, posix_ipc.O_CREAT, size=params_size) self._shm = _mmap(fd=self._shmref.fd, length=params_size) self._shmref.close_fd() self.global_params = [] off = 0 for dtype, shape in params_descr: self.global_params.append(numpy.ndarray(shape, dtype=dtype, buffer=self._shm, offset=off)) off += _get_descr_size(dtype, shape) self.unlock()
def __init__(self,ip,port): # global flyermmap print(posix_ipc.MESSAGE_QUEUES_SUPPORTED) print(posix_ipc.QUEUE_MESSAGES_MAX_DEFAULT) print(posix_ipc.QUEUE_MESSAGE_SIZE_MAX_DEFAULT) self.model_pub = rospy.Publisher("/gazebo/set_model_state",ModelState,queue_size=1) self.bridge = CvBridge() #self.image_sub = rospy.Subscriber("/mycam/image_raw",Image,self.callback) self.image_sub = rospy.Subscriber("/multisense/camera/left/image_raw",Image,self.callback) self.imageflag = 0 self.imagecnt = 0 self.key = 0 firstFoot = Foot([0,-0.1,0],Foot.RIGHT) standingFoot = Foot([0,0.1,0],Foot.LEFT) self.list = Steplist(firstFoot,standingFoot) self.height = 1.6 self.tilt = 0.0 memory = posix_ipc.SharedMemory("flyermmap", posix_ipc.O_CREAT, size=8*Mib) self.sem = posix_ipc.Semaphore("flyersem", posix_ipc.O_CREAT) self.memmap = mmap.mmap(memory.fd, memory.size) # flyermmap = self.memmap memory.close_fd() self.queue = posix_ipc.MessageQueue("/flyerqueue", posix_ipc.O_CREAT) #self.wsproc = prc.Popen('python flyerws.py', shell=True ) self.wsproc = prc.Popen('python -u flyerws.py --ip %s --port %s' % (ip,port), shell=True ) self.writecnt = 0 self.loc = (0,0,0) self.walker = Walker() self.walker.init()
def from_array(cls, array: np.ndarray): """ Put numpy array into shared memory in order to read it in another process. @param array: @type np.ndarray: @return: Return SharedArray object that contain numpy array. @rtype: SharedArray """ if type(array) is np.ndarray: shared_mem_uuid = str(uuid.uuid4())[16:] shared_memory = posix_ipc.SharedMemory(name=shared_mem_uuid, flags=posix_ipc.O_CREX, size=array.nbytes, read_only=False) memory_buffer = mmap.mmap(shared_memory.fd, shared_memory.size) memory_buffer.write(array.data) data = np.ndarray(buffer=memory_buffer, dtype=array.dtype, shape=array.shape) else: raise ValueError("array should be of type np.ndarray " "instead of {}".format(type(array))) return cls(data=data, shared_memory=shared_memory, memory_buffer=memory_buffer)
def bringup(self): self.memory = posix_ipc.SharedMemory(self.name, 0, size=self.size) self.mapfile = mmap.mmap(self.memory.fd, self.memory.size) # Once I've mmapped the file descriptor, I can close it without # interfering with the mmap. self.memory.close_fd()
def __del__(self): try: self.close() memory = posix_ipc.SharedMemory(self.name) memory.unlink() except posix_ipc.ExistentialError: pass
def _put_array_in_shm(arr, shm_objs, mmap_objs): if arr.size == 0: return "<EMPTY> 0" global vector_data_counter # the only need to start with a / on POSIX, but Windows doesn't seem to mind, so just putting it there always... name = "/vecdata-" + str(os.getpid()) + "-" + str(vector_data_counter) vector_data_counter += 1 system = platform.system() if system in ['Linux', 'Darwin']: mem = posix_ipc.SharedMemory(name, posix_ipc.O_CREAT | posix_ipc.O_EXCL, size=arr.nbytes) shm_objs.append(mem) if system == 'Darwin': # for some reason we can't write to the shm fd directly on mac, only after mmap-ing it with mmap.mmap(mem.fd, length=mem.size) as mf: mf.write(arr.astype(np.dtype('>f8')).tobytes()) else: with open(mem.fd, 'wb') as mf: arr.astype(np.dtype('>f8')).tofile(mf) elif system == 'Windows': # on Windows, the mmap module in itself provides shared memory functionality mm = mmap.mmap(-1, arr.nbytes, tagname=name) mmap_objs.append(mm) mm.write(arr.astype(np.dtype('>f8')).tobytes()) else: raise RuntimeError("unsupported platform") return name + " " + str(arr.nbytes)
def _load_pickle_from_shm(name_and_size: str): """ Internal. Opens a shared memory object (region, file, content) in a platform-specific way, unpickles its whole content, and returns the loaded object. `name_and_size` should be a space-separated pair of an object name and an integer, which is the size of the named SHM object in bytes. """ if not name_and_size: return None name, size = name_and_size.split(" ") size = int(size) if name == "<EMPTY>" and size == 0: return None system = platform.system() if system in ['Linux', 'Darwin']: mem = posix_ipc.SharedMemory(name) with mmap.mmap(mem.fd, mem.size) as mf: mf.write_byte(1) mf.seek(8) p = pickle.load(mf) elif system == 'Windows': # on Windows, the mmap module in itself provides shared memory functionality with mmap.mmap(-1, size, tagname=name) as mf: mf.write_byte(1) mf.seek(8) p = pickle.load(mf) else: raise RuntimeError("unsupported platform") return p
def create(self, shmem_suffix, size, needs_create): """ Creates all IPC structures. """ self.shmem_name = _shmem_pattern.format(shmem_suffix) self.size = size # Create or read share memory logger.debug('%s shmem `%s` (%s %s)', 'Creating' if needs_create else 'Opening', self.shmem_name, self.size, self.key_name) try: self._mem = ipc.SharedMemory(self.shmem_name, ipc.O_CREAT if needs_create else 0, size=self.size) except ipc.ExistentialError: raise ValueError('Could not create shmem `{}` ({}), e:`{}`'.format( self.shmem_name, self.key_name, format_exc())) # Map memory to mmap self._mmap = mmap(self._mem.fd, self.size) # Write initial data so that JSON .loads always succeeds self.store_initial() self.running = True
def ndarray_shm(shape, dtype, location, readonly=False, order='F', **kwargs): """Create a shared memory numpy array. Requires /dev/shm to exist.""" import posix_ipc from posix_ipc import O_CREAT import psutil nbytes = Vec(*shape).rectVolume() * np.dtype(dtype).itemsize available = psutil.virtual_memory().available preexisting = 0 # This might only work on Ubuntu shmloc = os.path.join(SHM_DIRECTORY, location) if os.path.exists(shmloc): preexisting = os.path.getsize(shmloc) elif readonly: raise SharedMemoryReadError(shmloc + " has not been allocated. Requested " + str(nbytes) + " bytes.") if readonly and preexisting != nbytes: raise SharedMemoryReadError("{} exists, but the allocation size ({} bytes) does not match the request ({} bytes).".format( shmloc, preexisting, nbytes )) if (nbytes - preexisting) > available: overallocated = nbytes - preexisting - available overpercent = (100 * overallocated / (preexisting + available)) raise SharedMemoryAllocationError(""" Requested more memory than is available. Shared Memory Location: {} Shape: {} Requested Bytes: {} Available Bytes: {} Preexisting Bytes*: {} Overallocated Bytes*: {} (+{:.2f}%) * Preexisting is only correct on linux systems that support /dev/shm/""" \ .format(location, shape, nbytes, available, preexisting, overallocated, overpercent)) # This might seem like we're being "extra safe" but consider # a threading condition where the condition of the shared memory # was adjusted between the check above and now. Better to make sure # that we don't accidently change anything if readonly is set. flags = 0 if readonly else O_CREAT size = 0 if readonly else int(nbytes) try: shared = posix_ipc.SharedMemory(location, flags=flags, size=size) array_like = mmap.mmap(shared.fd, shared.size) os.close(shared.fd) renderbuffer = np.ndarray(buffer=array_like, dtype=dtype, shape=shape, order=order, **kwargs) except OSError as err: if err.errno == errno.ENOMEM: # Out of Memory posix_ipc.unlink_shared_memory(location) raise renderbuffer.setflags(write=(not readonly)) return array_like, renderbuffer
def test_object_method_close_fd(self): """test that SharedMemory.close_fd() closes the file descriptor""" mem = posix_ipc.SharedMemory(self.mem.name) mem.close_fd() self.assertRaises(OSError, os.fdopen, mem.fd)