def _connect(self): ''' attempt to connect to the shared memory ''' LOG.info('shmem %s: attempting connect', self._name) shmem = SharedMemory(self._name) self._mmap = mmap.mmap(shmem.fd, shmem.size) shmem.close_fd() self._lock = Semaphore('/pyspace_%s_lock' % self._name) LOG.info('shmem %s: connect succeeded', self._name)
def __init__(self, name): # Initialize varaibles for memory regions and buffers and Semaphore self.shm_buf = None self.shm_region = None self.value_lock = None self.shm_name = name self.value_lock_name = name # Initialize shared memory buffer try: self.shm_region = SharedMemory(self.shm_name) self.shm_buf = mmap.mmap(self.shm_region.fd, sizeof(c_float)) self.shm_region.close_fd() except ExistentialError: self.shm_region = SharedMemory(self.shm_name, O_CREAT, size=sizeof(c_float)) self.shm_buf = mmap.mmap(self.shm_region.fd, self.shm_region.size) self.shm_region.close_fd() # Initialize or retreive Semaphore try: self.value_lock = Semaphore(self.value_lock_name, O_CREX) except ExistentialError: value_lock = Semaphore(self.value_lock_name, O_CREAT) value_lock.unlink() self.value_lock = Semaphore(self.value_lock_name, O_CREX) self.value_lock.release()
def __init__(self, name): # Initialize variables for memory regions and buffers and Semaphore self.shm_buf = None self.shm_region = None self.md_buf = None self.md_region = None self.image_lock = None self.shm_name = name self.md_name = name + "-meta" self.image_lock_name = name # Initialize or retreive metadata memory region try: self.md_region = SharedMemory(self.md_name) self.md_buf = mmap.mmap(self.md_region.fd, sizeof(MD)) self.md_region.close_fd() except ExistentialError: self.md_region = SharedMemory(self.md_name, O_CREAT, size=sizeof(MD)) self.md_buf = mmap.mmap(self.md_region.fd, self.md_region.size) self.md_region.close_fd() # Initialize or retreive Semaphore try: self.image_lock = Semaphore(self.image_lock_name, O_CREX) except ExistentialError: image_lock = Semaphore(self.image_lock_name, O_CREAT) image_lock.unlink() self.image_lock = Semaphore(self.image_lock_name, O_CREX) self.image_lock.release()
def __init__(self, name): self.shm_buf = None self.md_buf = None while not self.md_buf: try: print("Waiting for Block Input...") md_region = SharedMemory(name + '-meta') self.md_buf = mmap.mmap(md_region.fd, sizeof(MD)) md_region.close_fd() sleep(1) except ExistentialError: sleep(1) self.shm_name = name self.sem = Semaphore(name, 0)
def __init__(self, name): self.shm_buf = None self.md_buf = None while not self.md_buf: try: # print("Waiting for MetaData shared memory is available.") md_region = SharedMemory(name + "-meta") self.md_buf = mmap.mmap(md_region.fd, sizeof(MD)) md_region.close_fd() # sleep(1) except ExistentialError: pass # sleep(1) self.shm_name = name self.sem = Semaphore(name, 0)
def __init__(self, name): self.shm_buf = None self.md_buf = None logging.info("Reader launched") while not self.md_buf: try: logging.warning( "Waiting for MetaData shared memory is available.") md_region = SharedMemory(name + '-meta') self.md_buf = mmap.mmap(md_region.fd, sizeof(MD)) md_region.close_fd() sleep(1) except ExistentialError: sleep(1) self.shm_name = name self.sem = Semaphore(name, 0)
def lock(self, on_create: Callable = default_on_create): hash = hashlib.sha1(self.path.encode()).hexdigest() with Semaphore(f"/{hash}-lock", flags=O_CREAT, initial_value=1): retval = os.path.exists(self._file) if not retval: with open(self._file, "w") as f: f.write("1") on_create(self.path) return retval
def _create(self): ''' attempt to create and initialize the shared memory ''' LOG.info('shmem %s: attempting create shmem', self._name) shmem = SharedMemory(self._name, size=SHMEM_SIZE, flags=O_CREX) LOG.info('shmem %s: attempting create mmap', self._name) self._mmap = mmap.mmap(shmem.fd, shmem.size) shmem.close_fd() LOG.info('shmem %s: attempting create semaphore', self._name) self._lock = Semaphore('/pyspace_%s_lock' % self._name, flags=O_CREX) LOG.info('shmem %s: create succeeded', self._name) try: self._initialize() self._lock.release() except: LOG.exception('shmem %s: initialize failed; attempting unlink', self._name) shmem.unlink() self._lock.unlink() raise
class ShmRead: def __init__(self, name): self.shm_buf = None self.md_buf = None while not self.md_buf: try: # print("Waiting for MetaData shared memory is available.") md_region = SharedMemory(name + "-meta") self.md_buf = mmap.mmap(md_region.fd, sizeof(MD)) md_region.close_fd() # sleep(1) except ExistentialError: pass # sleep(1) self.shm_name = name self.sem = Semaphore(name, 0) def get(self): md = MD() self.sem.acquire() md_buf[:] = self.md_buf memmove(addressof(md), md_buf, sizeof(md)) self.sem.release() while not self.shm_buf: try: # print("Waiting for Data shared memory is available.") shm_region = SharedMemory(name=self.shm_name) self.shm_buf = mmap.mmap(shm_region.fd, md.size) shm_region.close_fd() # sleep(1) except ExistentialError: pass # sleep(1) self.sem.acquire() f = np.ndarray( shape=(md.shape_0, md.shape_1, md.shape_2), dtype="uint8", buffer=self.shm_buf, ) self.sem.release() return f def release(self): self.md_buf.close() self.shm_buf.close()
class SharedMemoryFrameReader: def __init__(self, name): self.shm_buf = None self.md_buf = None logging.info("Reader launched") while not self.md_buf: try: logging.warning( "Waiting for MetaData shared memory is available.") md_region = SharedMemory(name + '-meta') self.md_buf = mmap.mmap(md_region.fd, sizeof(MD)) md_region.close_fd() sleep(1) except ExistentialError: sleep(1) self.shm_name = name self.sem = Semaphore(name, 0) def get(self): md = MD() self.sem.acquire() md_buf[:] = self.md_buf memmove(addressof(md), md_buf, sizeof(md)) self.sem.release() while not self.shm_buf: try: logging.warning("Waiting for Data shared memory is available.") shm_region = SharedMemory(name=self.shm_name) self.shm_buf = mmap.mmap(shm_region.fd, md.size) shm_region.close_fd() sleep(1) except ExistentialError: sleep(1) self.sem.acquire() f = np.ndarray(shape=(md.shape_0, md.shape_1, md.shape_2), dtype='uint8', buffer=self.shm_buf) self.sem.release() return f def release(self): self.md_buf.close() self.shm_buf.close() logging.info("Reader terminated")
class Wire_Read: def __init__(self, name): self.shm_buf = None self.md_buf = None while not self.md_buf: try: print("Waiting for Block Input...") md_region = SharedMemory(name + '-meta') self.md_buf = mmap.mmap(md_region.fd, sizeof(MD)) md_region.close_fd() sleep(1) except ExistentialError: sleep(1) self.shm_name = name self.sem = Semaphore(name, 0) def get(self): md = MD() self.sem.acquire() md_buf[:] = self.md_buf memmove(addressof(md), md_buf, sizeof(md)) self.sem.release() while not self.shm_buf: try: print("Waiting for Data...") shm_region = SharedMemory(name=self.shm_name) self.shm_buf = mmap.mmap(shm_region.fd, md.size) shm_region.close_fd() sleep(1) except ExistentialError: sleep(1) self.sem.acquire() f = np.ndarray(shape=(md.shape_0, md.shape_1, md.shape_2), dtype='uint8', buffer=self.shm_buf) self.sem.release() return f def release(self): self.md_buf.close() self.shm_buf.close()
def setup_email(): # TODO: Fix this data race! This try/catch is ugly; why is it even # racing here? Perhaps we need to multiproc + multithread lock # inside of setup_database to block the check? with Semaphore("/test-emails", flags=O_CREAT, initial_value=1): if not os.path.exists(Email.TEST_DIR): # Create the directory. os.makedirs(Email.TEST_DIR) # Cleanup all email files for this test suite. prefix = Email.email_prefix(suite=True) files = os.listdir(Email.TEST_DIR) for file in files: if file.startswith(prefix): os.remove(os.path.join(Email.TEST_DIR, file))
def __init__(self, name): self.shm_region = None logging.info("Writer launched") self.md_region = SharedMemory(name + '-meta', O_CREAT, size=sizeof(MD)) self.md_buf = mmap.mmap(self.md_region.fd, self.md_region.size) self.md_region.close_fd() self.shm_buf = None self.shm_name = name self.count = 0 try: self.sem = Semaphore(name, O_CREX) except ExistentialError: sem = Semaphore(name, O_CREAT) sem.unlink() self.sem = Semaphore(name, O_CREX) self.sem.release()
def pi(n): pids = [] unit = n / 10 sem_lock = Semaphore('/pi_sem_lock', flags=posix_ipc.O_CREX, initial_value=1) memory = Memory('/pi_rw', size=8, flags=posix_ipc.O_CREX) os.lseek(memory.fd, 0, os.SEEK_SET) os.write(memory.fd, struct.pack('d', 0.0)) for i in range(10): mink = unit * i maxk = mink + unit pid = os.fork() if pid > 0: pids.append(pid) else: s = calc_slice(mink, maxk) sem_lock.acquire() try: os.lseek(memory.fd, 0, os.SEEK_SET) bs = os.read(memory.fd, 8) cur_val, = struct.unpack('d', bs) cur_val += s bs = struct.pack('d', cur_val) # εΊεε os.lseek(memory.fd, 0, os.SEEK_SET) os.write(memory.fd, bs) memory.close_fd() finally: sem_lock.release() sys.exit(0) sums = [] for pid in pids: os.waitpid(pid, 0) os.lseek(memory.fd, 0, os.SEEK_SET) bs = os.read(memory.fd, 8) sums, = struct.unpack('d', bs) memory.close_fd() memory.unlink() sem_lock.unlink() return math.sqrt(sums * 8)
class ValueEvent(object): """Provides behavior similar to threading.Event. However, this allows associating a value when "setting" (or notifying) the Event object. The ValueEvent object is used to communicate between two threads when one thread needs a value that is computed by another. The reading thread waits on the value (but does not set it), and the writing thread sets the value then triggers the event. In other words, there is exactly one write to the value from one thread, and one read from a different thread, and the ValueEvent object guarantees they don't overlap so long as the get and set methods are used by the reading and writing thread, respectively. Additionally, Python's threading.Event object provides a timeout feature like ours but introduces unacceptable delays. For that reason we use posix_ipc.Semaphore to mimic threading.Event without the performance penalty. http://stackoverflow.com/questions/21779183/python-eventwait-with-timeout-gives-delay """ def __init__(self, name=None): self._value = None self._semaphore = Semaphore(name, flags=O_CREX) def __del__(self): # If unlink isn't explicitly called the OS will *not* release the # semaphore object, even if the program crashes. We may want to spawn # a new process to manage them or give the semaphores known names when # creating them so they can be reclaimed on restart. self._semaphore.unlink() def get(self, timeout): try: self._semaphore.acquire(timeout) except BusyError: raise ValueEventTimeout(timeout) return self._value def set(self, value): self._value = value self._semaphore.release()
class SharedMemoryFrameWriter: def __init__(self, name): self.shm_region = None logging.info("Writer launched") self.md_region = SharedMemory(name + '-meta', O_CREAT, size=sizeof(MD)) self.md_buf = mmap.mmap(self.md_region.fd, self.md_region.size) self.md_region.close_fd() self.shm_buf = None self.shm_name = name self.count = 0 try: self.sem = Semaphore(name, O_CREX) except ExistentialError: sem = Semaphore(name, O_CREAT) sem.unlink() self.sem = Semaphore(name, O_CREX) self.sem.release() def add(self, frame: np.ndarray): byte_size = frame.nbytes if not self.shm_region: self.shm_region = SharedMemory(self.shm_name, O_CREAT, size=byte_size) self.shm_buf = mmap.mmap(self.shm_region.fd, byte_size) self.shm_region.close_fd() self.count += 1 md = MD(frame.shape[0], frame.shape[1], frame.shape[2], byte_size, self.count) self.sem.acquire() memmove(md_buf, addressof(md), sizeof(md)) self.md_buf[:] = bytes(md_buf) self.shm_buf[:] = frame.tobytes() self.sem.release() def release(self): self.sem.acquire() self.md_buf.close() unlink_shared_memory(self.shm_name + '-meta') self.shm_buf.close() unlink_shared_memory(self.shm_name) self.sem.release() self.sem.close() logging.info("Writer terminated")
class SharedImage: def __init__(self, name): # Initialize variables for memory regions and buffers and Semaphore self.shm_buf = None self.shm_region = None self.md_buf = None self.md_region = None self.image_lock = None self.shm_name = name self.md_name = name + "-meta" self.image_lock_name = name # Initialize or retreive metadata memory region try: self.md_region = SharedMemory(self.md_name) self.md_buf = mmap.mmap(self.md_region.fd, sizeof(MD)) self.md_region.close_fd() except ExistentialError: self.md_region = SharedMemory(self.md_name, O_CREAT, size=sizeof(MD)) self.md_buf = mmap.mmap(self.md_region.fd, self.md_region.size) self.md_region.close_fd() # Initialize or retreive Semaphore try: self.image_lock = Semaphore(self.image_lock_name, O_CREX) except ExistentialError: image_lock = Semaphore(self.image_lock_name, O_CREAT) image_lock.unlink() self.image_lock = Semaphore(self.image_lock_name, O_CREX) self.image_lock.release() # Get the shared image def get(self): # Define metadata metadata = MD() # Get metadata from the shared region self.image_lock.acquire() md_buf[:] = self.md_buf memmove(addressof(metadata), md_buf, sizeof(metadata)) self.image_lock.release() # Try to retreive the image from shm_buffer # Otherwise return a zero image try: self.shm_region = SharedMemory(self.shm_name) self.shm_buf = mmap.mmap(self.shm_region.fd, metadata.size) self.shm_region.close_fd() self.image_lock.acquire() image = np.ndarray(shape=(metadata.shape_0, metadata.shape_1, metadata.shape_2), dtype='uint8', buffer=self.shm_buf) self.image_lock.release() # Check for a None image if (image.size == 0): image = np.zeros((3, 3, 3), np.uint8) except ExistentialError: image = np.zeros((3, 3, 3), np.uint8) return image # Add the shared image def add(self, image): # Get byte size of the image byte_size = image.nbytes # Get the shared memory buffer to read from if not self.shm_region: self.shm_region = SharedMemory(self.shm_name, O_CREAT, size=byte_size) self.shm_buf = mmap.mmap(self.shm_region.fd, byte_size) self.shm_region.close_fd() # Generate meta data metadata = MD(image.shape[0], image.shape[1], image.shape[2], byte_size) # Send the meta data and image to shared regions self.image_lock.acquire() memmove(md_buf, addressof(metadata), sizeof(metadata)) self.md_buf[:] = md_buf[:] self.shm_buf[:] = image.tobytes() self.image_lock.release() # Destructor function to unlink and disconnect def close(self): self.image_lock.acquire() self.md_buf.close() try: unlink_shared_memory(self.md_name) unlink_shared_memory(self.shm_name) except ExistentialError: pass self.image_lock.release() self.image_lock.close()
def __init__(self, name=None): self._value = None self._semaphore = Semaphore(name, flags=O_CREX)
# -*- coding: utf-8 -*- from z3 import * from ast import * from utils import * from posix_ipc import Semaphore, O_CREAT import os import sys import atexit N_CPUS = 4 ## Use semaphore to limit the number of concurrent processes, ## we allow N_CPUS processes running simultaneously. sem = Semaphore("/fork_sem", O_CREAT, 0o644, N_CPUS) sem.unlink() sem.acquire() def on_exit(): sem.release() ## reap all zombies. try: while True: os.waitpid(0) except: pass log("exit") atexit.register(on_exit)
def __init__(self, target_path, args=[]): global _lock global _process global _target_path global _shm global _mm global _ping_sem global _pong_sem global _clients global _next_client_id if _shm is None: launch_afl_forkserver = True else: launch_afl_forkserver = False with _lock: self.client_id = _next_client_id _next_client_id += 1 if launch_afl_forkserver: if _process is None: env = os.environ.copy() if 'EXTERNAL_AFL_FORKSERVER' not in env: print("Starting afl-forkserver...") fd, afl_out_file = tempfile.mkstemp( suffix='afl_out_file') os.close(fd) FNULL = open(os.devnull, 'w') cmd = [ gym_fuzz1ng.afl_forkserver_path(), '-f', afl_out_file, '--', target_path, ] cmd += args cmd += ['@@'] _process = subprocess.Popen( cmd, env=env, stdout=FNULL, stderr=subprocess.STDOUT, ) _target_path = target_path time.sleep(1) _shm = SharedMemory(SHARED_MEM_NAME) _mm = mmap.mmap(_shm.fd, 0) _ping_sem = Semaphore(SEM_PING_SIGNAL_NAME) _pong_sem = Semaphore(SEM_PONG_SIGNAL_NAME) else: if target_path != _target_path: raise Exception( "Concurrent targets is not supported: {} {}". format( target_path, _target_path, ), ) else: print("Skipping afl-forkserver start.") _clients[self.client_id] = True
class PySpaceShMemConnection(object): ''' this class implements a pyspace shmem participart ''' def __init__(self, name='PySpace'): ''' constructor - connect to the shmem ''' self._name = name self._mmap = None self._lock = None def put(self, tpl): ''' put the given tuple into the tuple space. ''' LOG.info('pyspace %s: attempting to put tuple %s', self._name, tpl) data = pickle.dumps(tpl) LOG.info(' data: %s', data) self._lock.acquire() (offset,) = struct.unpack_from('I', self._mmap, 0x10) LOG.info(' space claims offset of %#010x', offset) (length,) = struct.unpack_from('I', self._mmap, offset) if length != PYSPACE_END: self._lock.release() raise ValueError('pyspace offset data corrupt') LOG.info(' packing tuple metadata to offset') struct.pack_into('IBB', self._mmap, offset, len(data), len(tpl), 0) LOG.info(' writing tuple payload') self._mmap[offset+6:offset+6+len(data)] = data LOG.info(' packing PYSPACE_END') struct.pack_into('I', self._mmap, offset + len(data) + 6, PYSPACE_END) LOG.info(' updating offset data') struct.pack_into('I', self._mmap, 0x10, offset + len(data) + 6) self._lock.release() def take(self, tpl): ''' take the queried tuple from the tuple space and return it. ''' LOG.info('pyspace %s: attempting to take tuple %s', self._name, tpl) start = PYSPACE_DATA_OFFSET while True: LOG.info('pyspace %s: looking at tuple at offset %#010x', self._name, start) (length, fields, flags) = struct.unpack_from('IBB', self._mmap, start) LOG.info(' length: %#010x, fields: %d, flags: %s', length, fields, '{0:b}'.format(flags)) if length == PYSPACE_END: LOG.info(' tail reached. no match.') return None if fields != len(tpl): LOG.info(' length mismatch.') start += 6 + length continue if flags & PYSPACE_FLAG_INVALID: LOG.info(' tuple invalidated.') start += 6 + length continue data = self._mmap[start+6:start+6+length] LOG.info(' fetched tuple data: %s', data) data = pickle.loads(data) LOG.info(' unpacked tuple data: %s', data) if all(x == y or y is None for (x, y) in zip(data, tpl)): LOG.info(' matching tuple found :^D') self._lock.acquire() # verify our view is still up to date (new_flags,) = struct.unpack_from('B', self._mmap, start + 5) if new_flags & PYSPACE_FLAG_INVALID: LOG.info(' tuple has already been invalidated.. moving on') self._lock.release() start += 6 + length continue # this is our token now! new_flags |= PYSPACE_FLAG_INVALID struct.pack_into('B', self._mmap, start + 5, new_flags) self._lock.release() return data start += 6 + length def peek(self, tpl): ''' seek the given tuple in the tuple space and return it. ''' LOG.info('pyspace %s: attempting to peek tuple %s', self._name, tpl) start = PYSPACE_DATA_OFFSET while True: LOG.info('pyspace %s: looking at tuple at offset %#010x', self._name, start) (length, fields, flags) = struct.unpack_from('IBB', self._mmap, start) LOG.info(' length: %#010x, fields: %d, flags: %s', length, fields, '{0:b}'.format(flags)) if length == PYSPACE_END: LOG.info(' tail reached. no match.') return None if fields != len(tpl): LOG.info(' length mismatch.') start += 6 + length continue if flags & PYSPACE_FLAG_INVALID: LOG.info(' tuple invalidated.') start += 6 + length continue data = pickle.loads(self._mmap[start+5:start+5+length]) LOG.info(' unpacked tuple data: %s', data) if all(x == y or y is None for (x, y) in zip(data, tpl)): LOG.info(' matching tuple found :^D') return data start += 6 + length def optimize(self): ''' remove all invalidated tuples ''' LOG.info('pyspace %s: attempting to optimize', self._name) start = PYSPACE_DATA_OFFSET last = PYSPACE_DATA_OFFSET while True: LOG.info('pyspace %s: looking at tuple at offset %#010x', self._name, start) (length, fields, flags) = struct.unpack_from('IBB', self._mmap, start) LOG.info(' length: %#010x, fields: %d, flags: %s', length, fields, '{0:b}'.format(flags)) if length == PYSPACE_END: LOG.info(' tail reached. all done.') struct.pack_into('I', self._mmap, last, PYSPACE_END) struct.pack_into('I', self._mmap, 0x10, last) return if flags & PYSPACE_FLAG_INVALID: LOG.info(' tuple invalidated. wiping.') start += 6 + length continue if last != start: LOG.info(' moving tuple from %d to %d', start, length) self._mmap[last:last+6+length] = self._mmap[start:start+6+length] start += 6 + length last += 6 + length def open(self): ''' connect to the shmem of the given name. this initializes the shmem, if it does not exist, on exactly one client ''' try: self._connect() except ExistentialError: LOG.warning('shmem %s: connect failed, need to create', self._name) try: self._create() except ExistentialError: LOG.warning('shmem %s: create failed, someone was faster', self._name) self._connect() # wait for space to be initialized tries = 0 while self._mmap[:7] != b'pyspace': time.sleep(1) tries += 1 if tries >= 10: raise ValueError('PySpace wait timed out - corrupted?') if self._mmap[8] != PYSPACE_VERSION: raise ValueError('PySpace version mismatch') def close(self): ''' close the connection to the shmem ''' self._mmap.close() self._lock.close() def _connect(self): ''' attempt to connect to the shared memory ''' LOG.info('shmem %s: attempting connect', self._name) shmem = SharedMemory(self._name) self._mmap = mmap.mmap(shmem.fd, shmem.size) shmem.close_fd() self._lock = Semaphore('/pyspace_%s_lock' % self._name) LOG.info('shmem %s: connect succeeded', self._name) def _create(self): ''' attempt to create and initialize the shared memory ''' LOG.info('shmem %s: attempting create shmem', self._name) shmem = SharedMemory(self._name, size=SHMEM_SIZE, flags=O_CREX) LOG.info('shmem %s: attempting create mmap', self._name) self._mmap = mmap.mmap(shmem.fd, shmem.size) shmem.close_fd() LOG.info('shmem %s: attempting create semaphore', self._name) self._lock = Semaphore('/pyspace_%s_lock' % self._name, flags=O_CREX) LOG.info('shmem %s: create succeeded', self._name) try: self._initialize() self._lock.release() except: LOG.exception('shmem %s: initialize failed; attempting unlink', self._name) shmem.unlink() self._lock.unlink() raise def _initialize(self): ''' prepare the shmem control structures ''' LOG.info('shmem %s: attempting initialize', self._name) LOG.info(' writing version number') self._mmap[8] = PYSPACE_VERSION LOG.info(' writing initial tail offset') struct.pack_into('I', self._mmap, 0x10, PYSPACE_DATA_OFFSET) LOG.info(' writing PYSPACE_END symbol') struct.pack_into('I', self._mmap, PYSPACE_DATA_OFFSET, PYSPACE_END) LOG.info(' writing magic number') self._mmap[:7] = b'pyspace' LOG.info('shmem %s: initialize succeeded', self._name)
class SharedValue: def __init__(self, name): # Initialize varaibles for memory regions and buffers and Semaphore self.shm_buf = None self.shm_region = None self.value_lock = None self.shm_name = name self.value_lock_name = name # Initialize shared memory buffer try: self.shm_region = SharedMemory(self.shm_name) self.shm_buf = mmap.mmap(self.shm_region.fd, sizeof(c_float)) self.shm_region.close_fd() except ExistentialError: self.shm_region = SharedMemory(self.shm_name, O_CREAT, size=sizeof(c_float)) self.shm_buf = mmap.mmap(self.shm_region.fd, self.shm_region.size) self.shm_region.close_fd() # Initialize or retreive Semaphore try: self.value_lock = Semaphore(self.value_lock_name, O_CREX) except ExistentialError: value_lock = Semaphore(self.value_lock_name, O_CREAT) value_lock.unlink() self.value_lock = Semaphore(self.value_lock_name, O_CREX) self.value_lock.release() # Get the shared value def get(self): # Retreive the data from buffer self.value_lock.acquire() value = struct.unpack('f', self.shm_buf)[0] self.value_lock.release() return value # Add the shared value def add(self, value): # Send the data to shared regions self.value_lock.acquire() self.shm_buf[:] = struct.pack('f', value) self.value_lock.release() # Destructor function to unlink and disconnect def close(self): self.value_lock.acquire() self.shm_buf.close() try: unlink_shared_memory(self.shm_name) except ExistentialError: pass self.value_lock.release() self.value_lock.close()