def barebox_overlay_mbr(fd_barebox, fd_hd): import mmap, os sb = os.fstat(fd_barebox.fileno()) barebox_image = mmap.mmap(fd_barebox.fileno(), 0, access=mmap.ACCESS_READ) check_for_valid_mbr(barebox_image, sb.st_size) required_size = sb.st_size hd_image = mmap.mmap(fd_hd.fileno(), required_size, access=mmap.ACCESS_WRITE) check_for_space(hd_image, required_size) # embed barebox's boot code into the disk drive image hd_image[0:OFFSET_OF_PARTITION_TABLE] = barebox_image[0:OFFSET_OF_PARTITION_TABLE] # embed the barebox main image into the disk drive image, # but keep the persistant environment storage untouched # (if defined), e.g. store the main image behind this special area. hd_image_start = SECTOR_SIZE barebox_image_start = SECTOR_SIZE size = sb.st_size - SECTOR_SIZE hd_image[hd_image_start:hd_image_start+size] = barebox_image[barebox_image_start:barebox_image_start+size] embed = PATCH_AREA indirect = SECTOR_SIZE fill_daps(DAPS(hd_image, embed), 1, INDIRECT_AREA, INDIRECT_SEGMENT, 1) rc = barebox_linear_image(hd_image, indirect, sb.st_size) if not rc: return False hd_image.close() barebox_image.close()
def __init__(self, size): if sys.version_info >= (2, 5, 0): self.buffer = mmap.mmap(-1, size) self.size = size self.name = None else: fd, self.name = tempfile.mkstemp(prefix='pym-') self.size = remaining = size while remaining > 0: remaining -= os.write(fd, '\0' * remaining) self.buffer = mmap.mmap(fd, size) os.close(fd) if sys.platform == 'cygwin': # cannot unlink file until it is no longer in use def _finalize_heap(mmap, unlink, name): mmap.close() unlink(name) Finalize( self, _finalize_heap, args=(self.buffer, os.unlink, name), exitpriority=-10 ) else: os.unlink(self.name)
def mergeTermFiles(): files = os.listdir(constants.termsDir) linesinFile = len(files) linesToAdd = getLinesToAdd(linesinFile) files = addDummyTerms(files,linesToAdd) files.sort() f = open(constants.termsFile, "r+b") map = mmap.mmap(f.fileno(), 0) fr = open(constants.termsListFile,"w") f2 = open(constants.fSortedTermIndex, "wb") byteLen = 0 for filex in files: if (filex[0:1] != constants.underscore): fr.write(filex+constants.space) fx = open(constants.termsDir+"/"+filex, "r+b") map1 = mmap.mmap(fx.fileno(), 0) map1.seek(0) map.resize(map.size()+map1.size()) map.write(map1[0:]) Str = makeTermStr(filex,byteLen,byteLen+map1.size()) byteLen = byteLen+map1.size() else: Str = makeTermStr(filex,0,0) f2.write(Str.encode(constants.encoding)) fr.close() f2.close() map.close() f.close()
def _mmap(self): ''' protected api ''' # mmap.mmap has a full bytebuffer API, so we can use it as is for bytebuffer. # we have to get a ctypes pointer-able instance to make our ctypes structure read efficient. # sad we can't have a bytebuffer from that same raw memspace # we do not keep the bytebuffer in memory, because it's a lost of space in most cases. if self._base is None: mmap_hack = True if mmap_hack: # XXX that is the most f****d up, non-portable f**k I ever wrote. self._local_mmap_bytebuffer = mmap.mmap(self._memdump.fileno(), self.end-self.start, access=mmap.ACCESS_READ) # yeap, that right, I'm stealing the pointer value. DEAL WITH IT. heapmap = struct.unpack('L', (ctypes.c_uint).from_address(id(self._local_mmap_bytebuffer) + 8 ) )[0] self._local_mmap_content = (ctypes.c_ubyte*(self.end-self.start)).from_address(heapmap) elif hasattr(self._memdump,'fileno'): # normal file. mmap kinda useless i suppose. log.warning('Memory Mapping content mmap-ed() (double copy of %s) : %s'%(self._memdump.__class__, self)) # we have the bytes local_mmap_bytebuffer = mmap.mmap(self._memdump.fileno(), self.end-self.start, access=mmap.ACCESS_READ) # we need an ctypes self._local_mmap_content = utils.bytes2array(local_mmap_bytebuffer, ctypes.c_ubyte) else: # dumpfile, file inside targz ... any read() API really self._local_mmap_content = utils.bytes2array(self._memdump.read(), ctypes.c_ubyte) log.warning('Memory Mapping content copied to ctypes array : %s'%(self)) # make that _base self._base = LocalMemoryMapping.fromAddress( self, ctypes.addressof(self._local_mmap_content) ) log.debug('LocalMemoryMapping done.') #redirect stuff self.readWord = self._base.readWord self.readArray = self._base.readArray self.readBytes = self._base.readBytes self.readStruct = self._base.readStruct return self._base
def test_big_mappings(self): with SpicommDev() as dev: with mmap.mmap(dev, length=7 * 1024 * 1024 + 1, offset=0 * mmap.PAGESIZE) as mm1, \ mmap.mmap(dev, length=12 * 1024 * 1024 + 2 , offset=num_pages(len(mm1)) * mmap.PAGESIZE) as mm2: self.assertEqual(len(mm1), 7 * 1024 * 1024 + 1) self.assertEqual(len(mm2), 12 * 1024 * 1024 + 2)
def __init__(self, evtout): path = format("/dev/uio%d" % (evtout)) self.uio = os.open(path, os.O_RDWR | os.O_SYNC, 0) self.pruss_phys_base = readhex(pruss_base) self.pruss_map_size = readhex(pruss_size) self.dataram_base = mmap.mmap(self.uio, self.pruss_map_size, mmap.MAP_SHARED, mmap.PROT_READ | mmap.PROT_WRITE) # hokey way to get at address of mmap region i = ctypes.c_uint8.from_buffer(self.dataram_base) ba = ctypes.addressof(i) self.drw = Mem(4,ba, self.pruss_map_size) self.drs = Mem(2,ba, self.pruss_map_size) self.drb = Mem(1,ba, self.pruss_map_size) self.version = self.detect_hw_version() if self.version < 0: raise Exception, "cannot detect hardware version" self.extram_phys_base = readhex(extram_base) self.extram_map_size = readhex(extram_size) self.extram = mmap.mmap(self.uio, self.extram_map_size, mmap.MAP_SHARED, mmap.PROT_READ | mmap.PROT_WRITE) e = ctypes.c_uint8.from_buffer(self.extram) ea = ctypes.addressof(e) self.erw = Mem(4,ea, self.extram_map_size) self.ers = Mem(2,ea, self.extram_map_size) self.erb = Mem(1,ea, self.extram_map_size)
def fix_savedata(dir): if (not os.path.isdir(dir) or not os.path.isfile(dir+"/SYS.BIN") ): ErrorMessageBox("Ŀ¼´íÎó") import mmap fd = os.open(dir+"/SYS.BIN", os.O_RDWR) buf = mmap.mmap(fd, os.fstat(fd).st_size, access=mmap.ACCESS_WRITE) if (buf[0:8] != "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF"): print "Bad savedata or not decrypted. SYS.BIN" ErrorMessageBox("´æµµ´íÎó") for pos in range(0x269480, 0x269480 + 0x1258 * 100) : if buf[pos:pos+4] == "\0\0\0\2" : buf[pos+0x18:pos+0x58] = "\0\0\0\0" * 0x10 pos+=0x1258 os.close(fd) print 'Fix SYS.BIN.' import fnmatch zstr = "\0\0\0\0" * ((0x8A358 - 0x46358) / 4) for directory, subdirectories, files in os.walk(dir): for file in files: if fnmatch.fnmatch(file, 'SAVE???.BIN'): fd = os.open(os.path.join(directory, file), os.O_RDWR) buf = mmap.mmap(fd, os.fstat(fd).st_size, access=mmap.ACCESS_WRITE) if (buf[0:4] != "\0\0\0\2") : print "Bad savedata or not decrypted. %s" % file ErrorMessageBox("´æµµ´íÎó»òδ½âÃÜ") buf[0x18:0x58] = "\0\0\0\0" * 0x10 buf[0x46358:0x8A358] = zstr os.close(fd) print 'Fix %s.' % (file) windll.user32.MessageBoxA(None, "´æµµÐÞÕýÍê³É!", EXE_TITLE, 0)
def _sequence2mmap(cls, sequence) -> (mmap.mmap, int): # Final """ :return An anonymous mmap storing the bytestring representation of the sequence @sequence, paired with the number of elements in the sequence. @sequence needs to either be a bytestring or an iterable containing only elements that implement __len__. """ def double_mmap_capacity(m): new_m = mmap.mmap(-1, capacity) new_m.write(bytes(m)) # FIXME Potentially large bytestring m.close() return new_m protection = cls._access() if isinstance(sequence, bytes): m = mmap.mmap(-1, len(sequence), access=protection) m.write(sequence) return m, len(m) capacity = mmap.PAGESIZE # Initial capacity. Cannot do len(sequence) since it is a generator. m = mmap.mmap(-1, capacity) currentsize = 0 element_count = 0 for element in sequence: element_count += 1 bs = cls._encode(element) currentsize += len(bs) while currentsize > capacity: capacity *= 2 m = double_mmap_capacity(m) # Because m.resize() is apparently bugged and causes SIGBUS m.write(bs) m.resize(currentsize) return m, element_count
def _open_mmap(_file): if CURRENT_PLATFORM == "Windows": mmap_file = mmap.mmap(_file.fileno(), 0, access=mmap.ACCESS_READ) else: # for Windows the mmap parameters are different mmap_file = mmap.mmap(_file.fileno(), 0, mmap.MAP_SHARED, mmap.PROT_READ) return mmap_file
def find_bundle_id(appath): for path, dirs, _ in os.walk(appath): for ddir in dirs: if ddir.endswith(".xcodeproj") and ddir != "Pods.xcodeproj": pbxpath = path + "/" + ddir + "/project.pbxproj" pbxfile = open(pbxpath) pbxmem = mmap.mmap(pbxfile.fileno(), 0, access=mmap.ACCESS_READ) searchstr = b"PRODUCT_BUNDLE_IDENTIFIER = " bundle_start = pbxmem.find(searchstr) if bundle_start > 0: bundle_off = pbxmem[bundle_start:].find(b";") bundle_id = pbxmem[bundle_start + len(searchstr): bundle_start+bundle_off].decode("UTF-8") group_id = bundle_id[:bundle_id.rfind(".")] group_id = group_id.replace("\"", "") return group_id # check the info plist for plist in deep_file_find(appath, "Info.plist"): with open(plist, 'r') as plistfile: plistmem = mmap.mmap(plistfile.fileno(), 0, access=mmap.ACCESS_READ) searchstr = b"<key>CFBundleIdentifier</key>" identifierkey = plistmem.find(searchstr) if identifierkey > 0: identifier = plistmem[len(searchstr)+identifierkey:] id_start = identifier.find(b"<string>") + len(b"<string>") id_end = identifier.find(b"</string>") full_id = identifier[id_start:id_end] bundle_id = full_id[:full_id.rfind(b".")] return bundle_id.decode("ascii")
def test_get_number(self): test_data = { '123': 123, '43445': 43445, '+17': 17, '-98': -98, '34.5': 34.5, '-3.62': -3.62, '+123.6': 123.6, '4.': 4.0, '-0.002': -0.002, '0.0': 0, } for key in test_data: with closing(NamedTemporaryFile()) as f: leading = map(lambda i: random.randint(0, 255), xrange(random.randint(0, 255))) f.write(bytearray(leading)) f.write(key) f.write(random.choice((' ', '\r', '\n'))) f.flush() with closing(mmap.mmap(f.fileno(), 0, prot=mmap.PROT_READ)) as stream: p = PDFLexer(stream) numeric_object = p.get_number(len(leading)) assert numeric_object.data == test_data[key] assert numeric_object.start_pos == len(leading) assert numeric_object.end_pos == len(leading) + len(key) with closing(NamedTemporaryFile()) as f: f.write('abc5\r') f.flush() with closing(mmap.mmap(f.fileno(), 0, prot=mmap.PROT_READ)) as stream: p = PDFLexer(stream) with pytest.raises(PDFLexerError): numeric_object = p.get_number(0)
def test_set_item(self): import mmap f = open(self.tmpname + "s", "w+") f.write("foobar") f.flush() m = mmap.mmap(f.fileno(), 6, access=mmap.ACCESS_READ) def fn(): m[1] = 'a' raises(TypeError, fn) m = mmap.mmap(f.fileno(), 6, access=mmap.ACCESS_WRITE) def fn(): m["foo"] = 'a' raises(TypeError, fn) def fn(): m[-7] = 'a' raises(IndexError, fn) def fn(): m[0] = 'ab' raises((IndexError, ValueError), fn) # IndexError is in CPython, # but doesn't make much sense # def f(m): m[1:3] = u'xx' # py.test.raises(IndexError, f, m) # def f(m): m[1:4] = "zz" # py.test.raises(IndexError, f, m) # def f(m): m[1:6] = "z" * 6 # py.test.raises(IndexError, f, m) # def f(m): m[:2] = "z" * 5 # m[1:3] = 'xx' # assert m.read(6) == "fxxbar" # m.seek(0) m[0] = 'x' assert m[0] == 'x' m[-6] = 'y' data = m.read(6) assert data == "yoobar" # yxxbar with slice's stuff m.close() f.close()
def test_get_stream(self): # empty stream with closing(NamedTemporaryFile()) as f: eol1 = random.choice(('\r\n', '\n')) eol2 = random.choice(('\r\n', '\r', '\n')) f.write('<<>>\nstream' + eol1 + eol2 + 'endstream') f.flush() with closing(mmap.mmap(f.fileno(), 0, prot=mmap.PROT_READ)) as stream: p = PDFLexer(stream) s = p.get_stream(0) assert s.data == '' assert s.start_pos == 0 assert s.end_pos == 5 + 6 + len(eol1) + len(eol2) + 8 + 1 assert s.stream_dict.data == {} with closing(NamedTemporaryFile()) as f: eol1 = random.choice(('\r\n', '\n')) eol2 = random.choice(('\r\n', '\r', '\n')) data = self._rand_string(random.randint(0, 65536)) f.write('<<>>\nstream' + eol1 + data + eol2 + 'endstream') f.flush() with closing(mmap.mmap(f.fileno(), 0, prot=mmap.PROT_READ)) as stream: p = PDFLexer(stream) s = p.get_stream(0) assert s.data == data assert s.start_pos == 0 assert s.end_pos == 5 + 6 + len(eol1) + len(data) + len(eol2) + 8 + 1 assert s.stream_dict.data == {}
def test_mmap(self): import mmap buffer_support = self.get_buffer_support() s = 'a\0x' mm = mmap.mmap(-1, 3) mm[:] = s assert buffer_support.check_readbuffer(mm) assert s == buffer_support.readbuffer_as_string(mm) assert s == buffer_support.writebuffer_as_string(mm) assert s == buffer_support.charbuffer_as_string(mm) s = '\0' * 3 buffer_support.zero_out_writebuffer(mm) assert s == ''.join(mm) assert s == buffer_support.readbuffer_as_string(mm) assert s == buffer_support.writebuffer_as_string(mm) assert s == buffer_support.charbuffer_as_string(mm) s = '\0' * 3 ro_mm = mmap.mmap(-1, 3, access=mmap.ACCESS_READ) assert buffer_support.check_readbuffer(ro_mm) assert s == buffer_support.readbuffer_as_string(ro_mm) assert raises(TypeError, buffer_support.writebuffer_as_string, ro_mm) assert s == buffer_support.charbuffer_as_string(ro_mm)
def __init__(self): self._acpmf_physics = mmap.mmap(0, ctypes.sizeof(SPageFilePhysics), "acpmf_physics") self._acpmf_graphics = mmap.mmap(0, ctypes.sizeof(SPageFileGraphic), "acpmf_graphics") self._acpmf_static = mmap.mmap(0, ctypes.sizeof(SPageFileStatic), "acpmf_static") self.physics = SPageFilePhysics.from_buffer(self._acpmf_physics) self.graphics = SPageFileGraphic.from_buffer(self._acpmf_graphics) self.static = SPageFileStatic.from_buffer(self._acpmf_static)
def rollover(self): """This method normalizes the file object by making ``path``, ``name`` and ``handle`` properties consistent. It writes incoming data to the file object and points the ``data`` iterable to the contents of this file. """ if self.data is not None: if self.path is None: self.handle = tempfile.NamedTemporaryFile() self.name = self.path = self.handle.name else: self.handle = open(self.path, 'wb') # data is a ByteArray, so a sequence of str/bytes objects for d in self.data: self.handle.write(d) elif self.handle is not None: self.data = [mmap(self.handle.fileno(), 0)] # 0 = whole file elif self.path is not None: if not isfile(self.path): logger.error("File path in %r not found", self) self.handle = open(self.path, 'rb') self.data = [mmap(self.handle.fileno(), 0, access=ACCESS_READ)] self.abspath = abspath(self.path) self.name = self.path = basename(self.path) else: raise ValueError("Invalid file object passed in. All of " ".data, .handle and .path are None.")
def create_jpeg_from_itc(artwork_file): """Parses out JPEG from .itc files""" global artwork_item_count global artwork_name_prefix try: artwork_item_count += 1 itc_file_handle = open(artwork_file, "r+") byte_data = mmap.mmap(itc_file_handle.fileno(),0) file_size = len(byte_data) new_size = file_size - JPEG_SIGNATURE_OFFSET # Extract out ITC metadata info that we don't need for now byte_data.move(0, JPEG_SIGNATURE_OFFSET, file_size - JPEG_SIGNATURE_OFFSET) byte_data.flush() byte_data.close() itc_file_handle.truncate(new_size) byte_data = mmap.mmap(itc_file_handle.fileno(),0) jpeg_file = artwork_file.replace('.itc', '.jpeg') artwork_path_components = jpeg_file.split("/") artwork_path_components[-1] = artwork_name_prefix + str(artwork_item_count) + ".jpeg" jpeg_file = "/".join(artwork_path_components) os.rename(artwork_file, jpeg_file) except: sys.stderr.write("Error: could not convert %s to JPEG." % str(artwork_file)) sys.exit(-1)
def test_offset (self): f = open (TESTFN, 'w+b') try: # unlink TESTFN no matter what halfsize = mmap.ALLOCATIONGRANULARITY m = self.make_mmap_file (f, halfsize) m.close () f.close () mapsize = halfsize * 2 # Try invalid offset f = open(TESTFN, "r+b") for offset in [-2, -1, None]: try: m = mmap.mmap(f.fileno(), mapsize, offset=offset) self.assertEqual(0, 1) except (ValueError, TypeError, OverflowError): pass else: self.assertEqual(0, 0) f.close() # Try valid offset, hopefully 8192 works on all OSes f = open(TESTFN, "r+b") m = mmap.mmap(f.fileno(), mapsize - halfsize, offset=halfsize) self.assertEqual(m[0:3], b'foo') f.close() m.close() finally: f.close() try: os.unlink(TESTFN) except OSError: pass
def loadDocs(path): #do this using memory-mapped io. faster? think so. print "Loading docs ..." #get number of lines numLines = 0 with open(path, "r+b") as f: m=mmap.mmap(f.fileno(), 0, prot=mmap.PROT_READ) while(m.readline() != ''): numLines += 1 print str(numLines) +" docs to load." docs = numLines *[None] #read the docs in with open(path, "r+b") as f: m=mmap.mmap(f.fileno(), 0, prot=mmap.PROT_READ) i = 0; while(True): line = m.readline() if line == '': break #print line line = line.rstrip().lstrip() line = line[line.find(' ')+1:] split = line.split(" ") doc = (n.array([int(p.split(":")[0]) for p in split]) ,n.array([int(p.split(":")[1]) for p in split])) #print doc #print docs[i] = doc i += 1 print "done." return docs
def initshm(self, count = 0): if count >= 3: # Well, we tried. Likely this died without cleaning up. sem = posix_ipc.Semaphore(self.name) sem.unlink() sem.close() del sem count = 0 try: sem = posix_ipc.Semaphore(self.name, flags=os.O_CREAT|os.O_EXCL) except ExistentialError: try: shm = posix_ipc.SharedMemory(self.name) except ExistentialError: # So, the semaphore exists but the shared memory does not. Clearly things # are rotten in the state of Denmark. # We'll try again a few more times; perhaps we unluckily another one of # us in the middle of creating the shm. time.sleep(.25) return self.initshm(count + 1) sem = posix_ipc.Semaphore(self.name) sem.acquire(5) mem = mmap.mmap(shm.fd, 0) return (shm, mem, sem) shm = posix_ipc.SharedMemory(self.name, flags=os.O_CREAT|os.O_EXCL, size=self.memlen) mem = mmap.mmap(shm.fd, 0) initial = pickle.dumps({"this": (0, 1)}) if len(initial) > self.memlen: raise ValueError("Your memlen is too small") mem.write(initial) mem.seek(0) return (shm, mem, sem)
def check_trajectory_file_type(file_name, bytes_to_check=1000000): #Check file exists if not os.path.isfile(file_name): print file_name + ' file does not exists' exit() #Check if LAMMPS file with open (file_name, "r+") as f: file_map = mmap.mmap(f.fileno(), bytes_to_check) num_test = [file_map.find('ITEM: TIMESTEP'), file_map.find('ITEM: NUMBER OF ATOMS'), file_map.find('ITEM: BOX BOUNDS')] file_map.close() if not -1 in num_test: return read_lammps_trajectory #Check if VASP file with open (file_name, "r+") as f: file_map = mmap.mmap(f.fileno(), bytes_to_check) num_test = [file_map.find('NIONS'), file_map.find('POMASS'), file_map.find('direct lattice vectors')] file_map.close() if not -1 in num_test: return read_vasp_trajectory print('Trajectory file not recognized') exit() return None
def insertIntoMmap(f, mmap_in, offset, data): data_length = len(data) mmap_in_size = len(mmap_in) new_map_size = mmap_in_size + data_length destination_offset = offset+data_length block_size = mmap_in_size-offset end_data_block = offset + data_length # flush the mmap due to python bug and to recreate at correct size mmap_in.flush() mmap_in.close() # write dummy data to the end of the file (just to extend the size) f.seek(os.SEEK_END) f.write(data) f.seek(offset) # create a new map, and shift the block mmap_in = mmap.mmap(f.fileno(), 0) new_mmap_in_size = len(mmap_in) mmap_in.move(destination_offset,offset,block_size) # insert the new data mmap_in[offset:end_data_block] = data return mmap.mmap(f.fileno(),0)
def _load(self, do_crop=False): """ Convert word file to html and store it in tempfile params: - do_crop: Whether to crop file to content or leave whole html """ #Get temporary file where wvHtml will store output out_file = tempfile.mkstemp()[1] #Call wvHtml subprocess.check_call(['wvHtml', self.file, out_file]) if do_crop: #Create mmap object for file self.html = open(out_file, 'r+b') self.html_map = mmap.mmap(self.html.fileno(), 0) #Get index of real data section start and end #21 is length of header start = self.html_map.find('<!--Section Begins-->') + 21 end = self.html_map.rfind('<!--Section Ends-->') #Resize map to new size self.html_map.move(0, start, end - start) self.html_map.resize(end - start) else: #Just load output self.html = open(out_file, 'r+b') self.html_map = mmap.mmap(self.html.fileno(), 0) #Fix paths to images self._fix_images()
def iter_keys(self, filename): # yields keyname, offset, size f = compat.file_open(filename, 'rb') header = f.read(8) self._verify_header(header) contents = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) remap_size = mmap.ALLOCATIONGRANULARITY * _MAPPED_LOAD_PAGES # We need to track the max_index to use as the upper bound # in the .find() calls to be compatible with python 2.6. # There's a bug in python 2.6 where if an offset is specified # along with a size of 0, then the size for mmap() is the size # of the file instead of the size of the file - offset. To # fix this, we track this ourself and make sure we never go passed # max_index. If we don't do this, python2.6 will crash with # a bus error (python2.7 works fine without this workaround). # See http://bugs.python.org/issue10916 for more info. max_index = os.path.getsize(filename) file_size_bytes = max_index num_resizes = 0 current = 8 try: while current != max_index: try: key_size, val_size = struct.unpack( '!ii', contents[current:current+8]) except struct.error: raise DBMLoadError() key = contents[current+8:current+8+key_size] if len(key) != key_size: raise DBMLoadError() offset = (remap_size * num_resizes) + current + 8 + key_size if offset + val_size > file_size_bytes: # If this happens then the index is telling us # to read past the end of the file. What we need # to do is stop reading from the index. return yield (key, offset, val_size) if val_size == _DELETED: val_size = 0 # Also need to skip past the 4 byte checksum, hence # the '+ 4' at the end current = current + 8 + key_size + val_size + 4 if current >= remap_size: contents.close() num_resizes += 1 offset = num_resizes * remap_size # Windows python2.6 bug. You can't specify a length of # 0 with an offset, otherwise you get a WindowsError, not # enough storage is available to process this command. # Couldn't find an issue for this, but the workaround # is to specify the actual length of the mmap'd region # which is the total size minus the offset we want. contents = mmap.mmap(f.fileno(), file_size_bytes - offset, access=mmap.ACCESS_READ, offset=offset) current -= remap_size max_index -= remap_size finally: contents.close() f.close()
def parseCovLog(covdef, covlog): fmatch = open(FNAME_MATCH, "w") fmismatch = open(FNAME_MISMATCH, "w") with open(os.path.join(covdef, FNAME_ARM), 'r+') as f: map_arm = mmap.mmap(f.fileno(), 0) with open(os.path.join(covdef, FNAME_THUMB), 'r+') as f: map_thumb = mmap.mmap(f.fileno(), 0) cl_lines = open(covlog, 'r').readlines() for cl in cl_lines: print "processing line: %s..." %cl cl_subs = cl.split(':') cl_variant = cl_subs[0] cl_count = cl_subs[1] found = 0 for fcd in map_arm, map_thumb: #fname in FNAME_ARM, FNAME_THUMB: # fcd = open(os.path.join(covdef, fname), 'r+') fcd.seek(0) last_pos = fcd.tell() while True: cd = fcd.readline() if not cd: break cd_str = cd.strip() if cd_str[0].isdigit(): cd_subs = cd.split(':') sn = cd_subs[0] variant = cd_subs[1] count = cd_subs[2] # if cl is matched with cd.subs[1] # then increase the count if variant.find(cl_variant) == 0: updated_count = int(count)+int(cl_count) fmatch.write("[MATCH] %s (current:%s, old:%s, new:%d)\n" %(cl, cl_count, count, updated_count)) print "[MATCH]!" found = 1 fcd.seek(last_pos) cd_subs[2] = str(updated_count) newstr = ':'.join(cd_subs) fcd.write(newstr) # fcd.flush() break last_pos = fcd.tell() if found == 1: break # if not found the matched line in the file # report the failure and exit if found == 0: fmismatch.write("%s\n" %cl) print "[MISMATCH]!!!" map_arm.close() map_thumb.close() fmatch.close() fmismatch.close() return 0
def __init__(self, config, threshold=5, false_start=5, blue_shmem_name="/blue.shmem", red_shmem_name="/red.shmem", clear_sem_name="/clear.sem", mem=4096): import posix_ipc as ipc self.ipc = ipc self.threshold = threshold self.false_start = false_start self.mem = mem self.blue_shmem = ipc.SharedMemory(blue_shmem_name, ipc.O_CREAT, mode=0666, size=self.mem) #read_only=True) self.red_shmem = ipc.SharedMemory(red_shmem_name, ipc.O_CREAT, mode=0666, size=self.mem) #read_only=True) self.clear_sem = ipc.Semaphore(clear_sem_name, ipc.O_CREAT, 0666, 0) self.red_map = mmap.mmap(self.red_shmem.fd, self.mem, mmap.MAP_SHARED, mmap.PROT_READ) self.blue_map = mmap.mmap(self.blue_shmem.fd, self.mem, mmap.MAP_SHARED, mmap.PROT_READ) # spawning C program that handles gpio signals Popen(['', '/home/koral/goldio/goldwire', config, str(threshold), str(mem)], executable='/usr/bin/sudo', stdout=PIPE)
def file_contents_ro(fd, stream=False, allow_mmap=True): """:return: read-only contents of the file represented by the file descriptor fd :param fd: file descriptor opened for reading :param stream: if False, random access is provided, otherwise the stream interface is provided. :param allow_mmap: if True, its allowed to map the contents into memory, which allows large files to be handled and accessed efficiently. The file-descriptor will change its position if this is False""" try: if allow_mmap: # supports stream and random access try: return mmap.mmap(fd, 0, access=mmap.ACCESS_READ) except EnvironmentError: # python 2.4 issue, 0 wants to be the actual size return mmap.mmap(fd, os.fstat(fd).st_size, access=mmap.ACCESS_READ) # END handle python 2.4 except OSError: pass # END exception handling # read manully contents = os.read(fd, os.fstat(fd).st_size) if stream: return _RandomAccessBytesIO(contents) return contents
def get_sinks(src,video_capture,frame): sink = src.overlay_sink if sink: sink_name = sink.short_id filename = os.path.join(settings.PROJECT_ROOT,"run","sinks",sink_name) overlay_f = open(filename, 'w+b') overlay_f.seek(0, os.SEEK_SET) try: overlay_f.write(frame.tostring() ) except Exception: video_capture.release() frame,video_capture = open_source(src) overlay_f.seek(0, os.SEEK_SET) overlay = mmap.mmap(overlay_f.fileno(), len(frame.tostring()), mmap.MAP_SHARED, prot=mmap.PROT_WRITE) else: overlay = None sink = src.raw_sink if sink: sink_name = sink.short_id filename = os.path.join(settings.PROJECT_ROOT,"run","sinks",sink_name) raw_f = open(filename, 'w+b') raw_f.seek(0, os.SEEK_SET) try: raw_f.write(frame.tostring() ) except Exception: video_capture.release() frame,video_capture = open_source(src) raw_f.seek(0, os.SEEK_SET) raw = mmap.mmap(raw_f.fileno(), len(frame.tostring()), mmap.MAP_SHARED, prot=mmap.PROT_WRITE) else: raw = None return raw,overlay,video_capture
def init(self): with self.lock: if self.inited: return files = os.listdir(self.dir_) for fi in files: if fi == "lock": continue file_path = os.path.join(self.dir_, fi) if not os.path.isfile(file_path) or LEGAL_STORE_FILE_REGEX.match(fi) is None: raise StoreNotSafetyShutdown("Store did not shutdown safety last time.") else: self.legal_files.append(file_path) self.legal_files = sorted(self.legal_files, key=lambda k: int(os.path.basename(k))) if len(self.legal_files) > 0: read_file_handle = self.file_handles[READ_ENTRANCE] = open(self.legal_files[-1], "r+") self.map_handles[READ_ENTRANCE] = mmap.mmap(read_file_handle.fileno(), self.store_file_size) if len(self.legal_files) == 1: self.file_handles[WRITE_ENTRANCE] = self.file_handles[READ_ENTRANCE] self.map_handles[WRITE_ENTRANCE] = self.map_handles[READ_ENTRANCE] else: write_file_handle = self.file_handles[WRITE_ENTRANCE] = open(self.legal_files[0], "r+") self.map_handles[WRITE_ENTRANCE] = mmap.mmap(write_file_handle.fileno(), self.store_file_size) self.inited = True
def identify_binary(self, file_path): binary_input_file = open(file_path, "rb") # Read in binary file as binary ("rb") if sys.platform == "win32": # Check if os is windows mapped_file = mmap.mmap(binary_input_file.fileno(), 0, None, mmap.ACCESS_READ) else: mapped_file = mmap.mmap(binary_input_file.fileno(), 0, mmap.MAP_PRIVATE, mmap.PROT_READ) for binary_file_type in self.binary_definitions: pattern_match = False for field in binary_file_type.markers: # Check for byte field in binary file if mapped_file.find(codecs.decode(field, "unicode_escape").encode("latin1")) != -1: pattern_match = True else: pattern_match = False break # All byte fields must be present in order for the file type check to pass if pattern_match: mapped_file.close() binary_input_file.close() return binary_file_type.name mapped_file.close() binary_input_file.close() return "unrecognized"
def _read_var_array(self): header = self.fp.read(4) if not header in [ZERO, NC_VARIABLE]: raise ValueError("Unexpected header.") records = 0 dtypes = {'names': [], 'formats': []} rec_vars = [] count = self._unpack_int() for var in range(count): name, dimensions, shape, attributes, type, start, vsize = self._read_var( ) # http://www.unidata.ucar.edu/software/netcdf/docs/netcdf.html # Note that vsize is the product of the dimension lengths # (omitting the record dimension) and the number of bytes # per value (determined from the type), increased to the # next multiple of 4, for each variable. If a record # variable, this is the amount of space per record. The # netCDF "record size" is calculated as the sum of the # vsize's of all the record variables. # # The vsize field is actually redundant, because its value # may be computed from other information in the header. The # 32-bit vsize field is not large enough to contain the size # of variables that require more than 2^32 - 4 bytes, so # 2^32 - 1 is used in the vsize field for such variables. if shape and shape[0] is None: # record variable rec_vars.append(name) # The netCDF "record size" is calculated as the sum of # the vsize's of all the record variables. self.__dict__['_recsize'] += vsize # Store the position where record arrays start. if records == 0: records = start dtypes['names'].append(name) dtypes['formats'].append(str(shape[1:]) + '>' + type.char) # Handle padding with a virtual variable. if type.char in 'bch': actual_size = reduce(mul, (1, ) + shape[1:]) * type.itemsize padding = -actual_size % 4 if padding: dtypes['names'].append('_padding_%d' % var) dtypes['formats'].append('(%d,)>b' % padding) # Data will be set later. data = None else: # not a record variable # Calculate size to avoid problems with vsize (above) size = reduce(mul, shape, 1) * type.itemsize pos = self.fp.tell() if self.use_mmap: if ALLOCATIONGRANULARITY: pages = start // ALLOCATIONGRANULARITY offset = pages * ALLOCATIONGRANULARITY start = start % ALLOCATIONGRANULARITY mm = mmap(self.fp.fileno(), start + size, access=ACCESS_READ, offset=offset) else: mm = mmap(self.fp.fileno(), start + size, access=ACCESS_READ) data = ndarray.__new__(ndarray, shape, dtype=type, buffer=mm, offset=start, order=0) else: self.fp.seek(start) data = fromstring(self.fp.read(size), type) data.shape = shape self.fp.seek(pos) # Add variable. self.variables[name] = netcdf_variable( data, type, shape, dimensions, attributes, maskandscale=self.maskandscale) if rec_vars: dtypes['formats'] = [ f.replace('()', '').replace(' ', '') for f in dtypes['formats'] ] # Remove padding when only one record variable. if len(rec_vars) == 1: dtypes['names'] = dtypes['names'][:1] dtypes['formats'] = dtypes['formats'][:1] # Build rec array. pos = self.fp.tell() if self.use_mmap: if ALLOCATIONGRANULARITY: pages = records // ALLOCATIONGRANULARITY offset = pages * ALLOCATIONGRANULARITY records = records % ALLOCATIONGRANULARITY mm = mmap(self.fp.fileno(), records + self._recs * self._recsize, access=ACCESS_READ, offset=offset) else: mm = mmap(self.fp.fileno(), records + self._recs * self._recsize, access=ACCESS_READ) rec_array = ndarray.__new__(ndarray, (self._recs, ), dtype=dtypes, buffer=mm, offset=records, order=0) else: self.fp.seek(records) rec_array = fromstring(self.fp.read(self._recs * self._recsize), dtype=dtypes) rec_array.shape = (self._recs, ) self.fp.seek(pos) for var in rec_vars: self.variables[var].__dict__['data'] = rec_array[var]
dest="direct_output", action="store_true", help="use direct I/O for output file (default False)") parser.add_argument("input", help="input filename") parser.add_argument("output", help="output filename") args = parser.parse_args() if args.size is None: args.size = os.path.getsize(args.input) if args.size == 0: parser.error("Cannot determine file size, please specify --size") start = time.time() buf = mmap.mmap(-1, args.blocksize) with closing(buf), \ directio.open(args.input, "r", direct=args.direct_input) as src, \ directio.open(args.output, "w", direct=args.direct_output) as dst: try: dst.truncate(args.size) except EnvironmentError as e: if e.errno != errno.EINVAL: raise pos = 0 while pos < args.size: n = src.readinto(buf) n = min(n, args.size - pos) if ioutil.is_zero(buffer(buf, 0, n)): dst.seek(n, os.SEEK_CUR) else:
#!/usr/bin/env python # coding=utf-8 import sys import mmap import struct from btc import Block BITCOIN_CONSTANT = b"\xf9\xbe\xb4\xd9" def parse_from_file(raw_data): length = len(raw_data) offset = 0 while offset < (length-4): if raw_data[offset: offset+4] == BITCOIN_CONSTANT: offset += 4 size = struct.unpack("<I", raw_data[offset:offset+4])[0] offset += 4+size block = Block().parse_from_hex(raw_data[offset-8-size: offset]) print(block) else: offset += 1 if __name__ == '__main__': blk_file_path = sys.argv[1] with open(blk_file_path, 'rb') as f: raw_data = mmap.mmap(f.fileno(), 0, prot=mmap.PROT_READ) parse_from_file(raw_data)
def getFrame(self, type): result = self.lib.getFrame(ctypes.c_void_p(self.frame.ctypes.data)) if (type == 0): # rgb return self.frame[:, :, 0:3] elif (type == 1): # gray return self.frame[:, :, 3] elif (type == 2): # rgb compressed ret, jpeg = cv2.imencode('.jpg', self.frame[:, :, 0:3]) return jpeg.tobytes() elif (type == 3): # gray compressed ret, jpeg = cv2.imencode('.jpg', self.frame[:, :, 3]) return jpeg.tobytes() elif (type == 4): # rgb compressed and tracked tempImage = self.frame[:, :, 0:3] self.clone_img = copy.copy(tempImage) # open dev mem and see to base address f = open("/dev/mem", "r+b") read_mem = mmap.mmap(f.fileno(), 40, offset=0x43c10000) reg = 32 read_mem.seek(reg) self.fromMem = int(struct.unpack('l', read_mem.read(4))[0]) #print(self.fromMem) self.x = 0 self.y = 0 if self.fromMem == 1024: rMin = 80 rMax = 255 gMin = 135 gMax = 190 bMin = 0 bMax = 15 # generate threshold array lower = np.array([bMin, gMin, rMin]) upper = np.array([bMax, gMax, rMax]) #(Ideas derived from https://www.pyimagesearch.com/2015/09/14/ball-tracking-with-opencv/) #gaussian blur to reduce high frequency noise (especially applicable when video functionality is implemented) blurred = cv2.GaussianBlur(self.clone_img, (11, 11), 0) #converted to hsv color space hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV) #mask colors based on thresholds mask = cv2.inRange(hsv, lower, upper) #remove small blobs mask = cv2.erode(mask, None, iterations=2) mask = cv2.dilate(mask, None, iterations=2) cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) cnts = imutils.grab_contours(cnts) center = None # only proceed if at least one contour was found if len(cnts) > 0: # find the largest contour in the mask, then use # it to compute the minimum enclosing circle and # centroid c = max(cnts, key=cv2.contourArea) ((self.x, self.y), radius) = cv2.minEnclosingCircle(c) M = cv2.moments(c) center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"])) # only proceed if the radius meets a minimum size if radius > 10: # draw the circle and centroid on the frame, # then update the list of tracked points cv2.circle(self.clone_img, (int(self.x), int(self.y)), int(radius), (0, 255, 255), 2) cv2.circle(self.clone_img, center, 5, (0, 0, 255), -1) # Tell user coordinates: #print("self.x location: " + str(self.x)) #print("self.y location: " + str(self.y)) #cv2.imshow('Result',self.clone_img) elif self.fromMem == 1706: #(Ideas derived from https://realpython.com/face-recognition-with-python/) along with haarcascade_frontalface_default XML doc. cascPath = "haarcascade_frontalface_default.xml" # Create the haar cascade faceCascade = cv2.CascadeClassifier(cascPath) gray = cv2.cvtColor(self.clone_img, cv2.COLOR_BGR2HSV) # Detect faces in the image faces = faceCascade.detectMultiScale( gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30), flags=cv2.CASCADE_SCALE_IMAGE) self.x = 0 self.y = 0 # Draw a rectangle around the faces for (x, y, w, h) in faces: cv2.rectangle(self.clone_img, (x, y), (x + w, y + h), (0, 255, 0), 2) if (math.sqrt(((abs(320 - (x + (w / 2)))) ^ 2) + ((abs(240 - (y + (h / 2)))) ^ 2)) < math.sqrt(((abs(320 - (self.x + (w / 2)))) ^ 2) + ((abs(240 - self.y)) ^ 2))): self.x = x + (w / 2) self.y = y + (h / 2) cv2.line(self.clone_img, (int(self.x), 0), (int(self.x), 480), 5) cv2.line(self.clone_img, (0, int(self.y)), (480, int(self.y)), 5) #cv2.imshow('Result',self.clone_img) self.memCam.seek(0) self.memCam.write(struct.pack('i', int(self.x))) self.memCam.seek(4) self.memCam.write(struct.pack('i', int(self.y))) ret, jpeg = cv2.imencode('.jpg', self.clone_img) return jpeg.tobytes() elif (type == 5): # rgb with on screen display of vitals tempImage = np.ascontiguousarray( self.frame[:, :, 0:3], dtype=np.uint8 ) # must make it contiguous for opencv processing to work self.memADC.seek(0) busVoltage = 'Bus Voltage: ' + str( round((struct.unpack('f', self.memADC.read(4))[0]), 2)) + ' V' self.memIMU.seek(0) imuAccelX = 'X Accel: ' + str( round((struct.unpack('h', self.memIMU.read(2))[0] / 1670.70), 2)) + ' m/sec^2' self.memIMU.seek(4) imuAccelY = 'Y Accel: ' + str( round((struct.unpack('h', self.memIMU.read(2))[0] / -1670.70), 2)) + ' m/sec^2' self.memIMU.seek(8) imuAccelZ = 'Z Accel: ' + str( round((struct.unpack('h', self.memIMU.read(2))[0] / -1670.70), 2)) + ' m/sec^2' font = cv2.FONT_HERSHEY_SIMPLEX cv2.putText(tempImage, imuAccelX, (544, 410), font, .5, (255, 255, 255), 1) cv2.putText(tempImage, imuAccelY, (544, 430), font, .5, (255, 255, 255), 1) cv2.putText(tempImage, imuAccelZ, (544, 450), font, .5, (255, 255, 255), 1) cv2.putText(tempImage, busVoltage, (544, 470), font, .5, (255, 255, 255), 1) cv2.rectangle(tempImage, (540, 392), (750, 478), (0, 255, 0), 2) #return tempImage ret, jpeg = cv2.imencode('.jpg', tempImage) return jpeg.tobytes() else: print "invalid parameter"
def __init__(self, f): self.mmap = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
# end if return all_idx # end def all_lines_with_tag if __name__ == '__main__': fname = 'wftest.000' nline_max = int(1e6) # assume fname has at most 1 million lines ratio_tol = 1e-6 # tolerance for ratio test rel_tol = 1e-3 # tolerance for relative error (0.1%) from mmap import mmap with open(fname, 'r+') as f: mm = mmap(f.fileno(), 0) # end with # 1. grade finite-difference test plocs = all_lines_with_tag(mm, 'For particle', nline_max=nline_max) fsuccess = True # finite-difference test success for ploc in plocs: mm.seek(ploc) success = check_next_particle_grad_lap(mm, rel_tol) fsuccess = fsuccess & success # end for plocs # 2. grade ratio test """ use function designed for: Deriv Numeric Analytic Diff to parse:
ackPackets = [] sendCount = 0 subprocess.Popen("echo '' > ack.log", shell=True, stdout=subprocess.PIPE) with open("ack.log", "r+b") as file: while len(myPackets) is not len(ackPackets) and sendCount < 5: sendCount += 1 print("Received " + str(len(ackPackets)) + "/" + str(len(myPackets)) + ": " + str(ackPackets)) for i in range(0, len(myPackets)): if i not in ackPackets: time.sleep(3) print(str(i) + ":" + myPackets[i]) #binary packet #Set up Tx on GPIO 17 rfdevice = RFDevice(17) rfdevice.enable_tx() rfdevice.tx_code(str(myPackets[i]), None, None) rfdevice.cleanup() try: mm = mmap.mmap(file.fileno(), 0) line = mm.readline().decode() ind = int(line[22:26], 2) mm.close() if ind not in ackPackets: ackPackets.append(ind) except: pass
def __init__(self): f = open(os.path.dirname(sys.argv[0]) + '/pwned-passwords-2.0.bin') self.mm = mmap.mmap(f.fileno(), 0, prot=mmap.PROT_READ)
t = f >> (i * 8) s = s + chr(t & 0xFF) return s # Open memory as file descriptor fd = os.open("/dev/mem", os.O_RDWR | os.O_SYNC) # Check if opened sucessfully if fd == -1: print "error opening /dev/mem!" exit() # Map /dev/mem to writable block of memory vb = mmap.mmap(fd, HW_REGS_SPAN, flags=mmap.MAP_SHARED, offset=HW_REGS_BASE) # 7-seg base address pos = (ALT_LWFPGASLVS_OFST + LEDS) & HW_REGS_MASK # Move memory block pointer to above address vb.seek(pos) # Display 4 number on Hex0-Hex3 def display(a, b, c, d): vb.write( cons([ numtable[str(d)], numtable[str(c)], numtable[str(b)], numtable[str(a)] ]))
# print '---> background' #print inDir+'/'+prefix+ana+suffix histo = get(file_ana, what, Lfac, colour, 2, 1, 0, 0) maximum = 1.5 * histo.GetMaximum() plot(ana, 0, 0, 0.01, maximum, 0, xMax[l], label[l], 'evts / bin', 0) #xMax[l] or maxX l = l + 1 ##################### controlPlots.html ##################### # open, search for line, if there do nothing; if not there, produce line for 'production' #searchfile = open(control_dir+'controlPlots.html', 'r') # production.html # does entry production exists ? if yes, production.html exists f = open(control_dir + 'controlPlots.html') s = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) if s.find(production) != -1: f.close() # production.html does not exist --> produce it else: # insert entry for production in controlPlots.html #! find the right place to insert with open(control_dir + 'controlPlots.html', 'r+') as file: lines = file.readlines() lines.insert( -5, '<li><a href="' + production + '/' + production + '.html" style="text-decoration:none;">' + production + '</a></li>\n') # important or not ???? file.close() with open(control_dir + 'controlPlots.html', 'w') as file: file.writelines(lines) file.close()
def fm_open(self, path): #f = open('S:\\Code\\Cpp\\2014\\Release\\cppcsh.bin', 'rb+') self.f = open(path, 'rb+') self.m = mmap.mmap(self.f.fileno(), 60000) self.m.seek(0)
def randomizeRom(romPath, goal, flags=[], patchList=[], banList=None, allowList=None, modifiers=[], adjustTrainerLevels=False, adjustRegularWildLevels=False, adjustSpecialWildLevels=False, trainerLVBoost=0, wildLVBoost=0, requiredItems=[ 'Surf', 'Squirtbottle', 'Flash', 'Mystery Egg', 'Cut', 'Strength', 'Secret Potion', 'Red Scale', 'Whirlpool', 'Card Key', 'Basement Key', 'Waterfall', 'S S Ticket', 'Bicycle', 'Machine Part', 'Lost Item', 'Pass', 'Fly' ], plandoPlacements={}, coreProgress=[ 'Surf', 'Fog Badge', 'Pass', 'S S Ticket', 'Squirtbottle', 'Cut', 'Hive Badge' ], otherSettings={}): requiredItemsCopy = copy.copy(requiredItems) changeListDict = defaultdict(lambda: []) extraTrash = [] for i in modifiers: #print(i) if 'FlagsSet' in i: flags.extend(i['FlagsSet']) if 'Changes' in i: for j in i['Changes']: changeListDict[j['Location']].append(j) if 'AddedItems' in i: for j in i['AddedItems']: if j not in requiredItemsCopy: requiredItemsCopy.append(j) #print(requiredItems) if 'AddedTrash' in i: extraTrash.extend(i['AddedTrash']) if 'NewGamePatches' in i: for j in i['NewGamePatches']: pfile = open(j) ptext = pfile.read() patchList.extend(json.loads(ptext)) print(changeListDict) Zephyr = Badge.Badge() Zephyr.isTrash = False Zephyr.Name = 'Zephyr Badge' Zephyr.Code = 27 Fog = Badge.Badge() Fog.isTrash = False Fog.Name = 'Fog Badge' Fog.Code = 30 Hive = Badge.Badge() Hive.isTrash = False Hive.Name = 'Hive Badge' Hive.Code = 28 Plain = Badge.Badge() Plain.isTrash = False Plain.Name = 'Plain Badge' Plain.Code = 29 Storm = Badge.Badge() Storm.isTrash = False Storm.Name = 'Storm Badge' Storm.Code = 32 Mineral = Badge.Badge() Mineral.isTrash = True Mineral.Name = 'Mineral Badge' Mineral.Code = 31 Glacier = Badge.Badge() Glacier.isTrash = False Glacier.Name = 'Glacier Badge' Glacier.Code = 33 Rising = Badge.Badge() Rising.isTrash = False Rising.Name = 'Rising Badge' Rising.Code = 34 if ('Kanto Mode' in flags): Thunder = Badge.Badge() Thunder.isTrash = True Thunder.Name = 'Thunder Badge' Thunder.Code = 37 Marsh = Badge.Badge() Marsh.isTrash = True Marsh.Name = 'Marsh Badge' Marsh.Code = 40 Rainbow = Badge.Badge() Rainbow.isTrash = True Rainbow.Name = 'Rainbow Badge' Rainbow.Code = 38 Soul = Badge.Badge() Soul.isTrash = True Soul.Name = 'Soul Badge' Soul.Code = 39 Cascade = Badge.Badge() Cascade.isTrash = True Cascade.Name = 'Cascade Badge' Cascade.Code = 36 Boulder = Badge.Badge() Boulder.isTrash = True Boulder.Name = 'Boulder Badge' Boulder.Code = 35 Volcano = Badge.Badge() Volcano.isTrash = True Volcano.Name = 'Volcano Badge' Volcano.Code = 41 Earth = Badge.Badge() Earth.isTrash = True Earth.Name = 'Earth Badge' Earth.Code = 42 BadgeDict = { 'Fog Badge': Fog, 'Zephyr Badge': Zephyr, 'Hive Badge': Hive, 'Plain Badge': Plain, 'Storm Badge': Storm, 'Mineral Badge': Mineral, 'Glacier Badge': Glacier, 'Rising Badge': Rising, 'Thunder Badge': Thunder, 'Marsh Badge': Marsh, 'Rainbow Badge': Rainbow, 'Soul Badge': Soul, 'Cascade Badge': Cascade, 'Boulder Badge': Boulder, 'Volcano Badge': Volcano, 'Earth Badge': Earth } else: BadgeDict = { 'Fog Badge': Fog, 'Zephyr Badge': Zephyr, 'Hive Badge': Hive, 'Plain Badge': Plain, 'Storm Badge': Storm, 'Mineral Badge': Mineral, 'Glacier Badge': Glacier, 'Rising Badge': Rising } result = ['Nothing', 'Here'] while goal not in result[0]: try: res = LoadLocationData.LoadDataFromFolder(".", banList, allowList, changeListDict) progressItems = copy.copy(requiredItemsCopy) #hardcoding key item lookups for now, pass as parameter in future keyItemMap = { 'Surf': 'HM_SURF', 'Squirtbottle': "SQUIRTBOTTLE", 'Flash': 'HM_FLASH', 'Mystery Egg': 'MYSTERY_EGG', 'Cut': 'HM_CUT', 'Strength': 'HM_STRENGTH', 'Secret Potion': 'SECRETPOTION', 'Red Scale': 'RED_SCALE', 'Whirlpool': 'HM_WHIRLPOOL', 'Card Key': 'CARD_KEY', 'Basement Key': 'BASEMENT_KEY', 'Waterfall': 'HM_WATERFALL', 'S S Ticket': 'S_S_TICKET', 'Machine Part': 'MACHINE_PART', 'Lost Item': 'LOST_ITEM', 'Bicycle': 'BICYCLE', 'Pass': '******', 'Fly': 'HM_FLY', 'Clear Bell': 'CLEAR_BELL', 'Rainbow Wing': 'RAINBOW_WING', 'Pokegear': 'ENGINE_POKEGEAR', 'Radio Card': 'ENGINE_RADIO_CARD', 'Expansion Card': 'ENGINE_EXPN_CARD' } trashItems = [x for x in res[1] if not x in keyItemMap.values() ] #ensure progress items don't sneak into trash list trashItems.extend(extraTrash) if 'TrashItemList' in otherSettings: trashItems = copy.copy(otherSettings['TrashItemList']) if 'ProgressItems' in otherSettings: progressItems = copy.copy(otherSettings['ProgressItems']) print(otherSettings) LocationList = res[0] print(progressItems) print(trashItems) result = RandomizeItems.RandomizeItems( 'None', LocationList, progressItems, trashItems, BadgeDict, inputFlags=flags, plandoPlacements=plandoPlacements, coreProgress=coreProgress) if goal not in result[0]: print('bad run, retrying') except Exception as err: print('Failed with error: ' + str(err) + ' retrying...') traceback.print_exc() print('-------') for j in result[0]: i = result[0][j] if (i.NormalItem is None and i.isItem()): print(i.Name) print('-------') for j in result[0]: i = result[0][j] if (i.NormalItem is not None and not i.isItem()): print(i.Name) yamlfile = open("crystal-speedchoice-label-details.json") yamltext = yamlfile.read() addressLists = json.loads(yamltext) addressData = {} for i in addressLists: addressData[i['label'].split(".")[-1]] = i print(addressData) #newTree = PokemonRandomizer.randomizeTrainers(result[0],85,lambda y: monFun(y,1001,85),True,banMap) #get furthest item location distance maxDist = max(result[2].values()) f = open(romPath, 'r+b') romMap = mmap.mmap(f.fileno(), 0) RandomizerRom.DirectWriteItemLocations(result[0].values(), addressData, romMap, 'Progressive Rods' in flags) if adjustRegularWildLevels: RandomizerRom.WriteWildLevelsToMemory(result[0], result[2], addressData, romMap, wildLVBoost, maxDist) if adjustSpecialWildLevels: RandomizerRom.WriteSpecialWildToMemory(result[0], result[2], addressData, romMap, wildLVBoost, maxDist) if adjustTrainerLevels: RandomizerRom.WriteTrainerDataToMemory(result[0], result[2], addressData, romMap, trainerLVBoost, maxDist) RandomizerRom.ApplyGamePatches(romMap, patchList) #RandomizerRom.WriteTrainerLevels(result[0], result[2],newTree) #RandomizerRom.WriteWildLevels(result[0], result[2],lambda x,y: monFun(x,y,85)) #RandomizerRom.WriteSpecialWildLevels(result[0], result[2],lambda x,y: monFun(x,y,85)) #print(result[2]) #print(result[1]) return result
def test_all(self): # this is a global test, ported from test_mmap.py import mmap from mmap import PAGESIZE import sys import os filename = self.tmpname + "w" f = open(filename, "w+") # write 2 pages worth of data to the file f.write('\0' * PAGESIZE) f.write('foo') f.write('\0' * (PAGESIZE - 3)) f.flush() m = mmap.mmap(f.fileno(), 2 * PAGESIZE) f.close() # sanity checks assert m.find("foo") == PAGESIZE assert len(m) == 2 * PAGESIZE assert m[0] == '\0' assert m[0:3] == '\0\0\0' # modify the file's content m[0] = '3' m[PAGESIZE + 3:PAGESIZE + 3 + 3] = 'bar' # check that the modification worked assert m[0] == '3' assert m[0:3] == '3\0\0' assert m[PAGESIZE - 1:PAGESIZE + 7] == '\0foobar\0' m.flush() # test seeking around m.seek(0, 0) assert m.tell() == 0 m.seek(42, 1) assert m.tell() == 42 m.seek(0, 2) assert m.tell() == len(m) raises(ValueError, m.seek, -1) raises(ValueError, m.seek, 1, 2) raises(ValueError, m.seek, -len(m) - 1, 2) # try resizing map if not (("darwin" in sys.platform) or ("freebsd" in sys.platform)): m.resize(512) assert len(m) == 512 raises(ValueError, m.seek, 513, 0) # check that the underlying file is truncated too f = open(filename) f.seek(0, 2) assert f.tell() == 512 f.close() assert m.size() == 512 m.close() f.close() # test access=ACCESS_READ mapsize = 10 f = open(filename, "wb") f.write("a" * mapsize) f.close() f = open(filename, "rb") m = mmap.mmap(f.fileno(), mapsize, access=mmap.ACCESS_READ) assert m[:] == 'a' * mapsize def f(m): m[:] = 'b' * mapsize raises(TypeError, f, m) def fn(): m[0] = 'b' raises(TypeError, fn) def fn(m): m.seek(0, 0) m.write("abc") raises(TypeError, fn, m) def fn(m): m.seek(0, 0) m.write_byte("d") raises(TypeError, fn, m) if not (("darwin" in sys.platform) or ("freebsd" in sys.platform)): raises(TypeError, m.resize, 2 * mapsize) assert open(filename, "rb").read() == 'a' * mapsize # opening with size too big f = open(filename, "r+b") if not os.name == "nt": # this should work under windows raises(ValueError, mmap.mmap, f.fileno(), mapsize + 1) f.close() # if _MS_WINDOWS: # # repair damage from the resizing test. # f = open(filename, 'r+b') # f.truncate(mapsize) # f.close() m.close() # test access=ACCESS_WRITE" f = open(filename, "r+b") m = mmap.mmap(f.fileno(), mapsize, access=mmap.ACCESS_WRITE) m.write('c' * mapsize) m.seek(0) data = m.read(mapsize) assert data == 'c' * mapsize m.flush() m.close() f.close() f = open(filename, 'rb') stuff = f.read() f.close() assert stuff == 'c' * mapsize # test access=ACCESS_COPY f = open(filename, "r+b") m = mmap.mmap(f.fileno(), mapsize, access=mmap.ACCESS_COPY) m.write('d' * mapsize) m.seek(0) data = m.read(mapsize) assert data == 'd' * mapsize m.flush() assert open(filename, "rb").read() == 'c' * mapsize if not (("darwin" in sys.platform) or ("freebsd" in sys.platform)): raises(TypeError, m.resize, 2 * mapsize) m.close() f.close() # test invalid access f = open(filename, "r+b") raises(ValueError, mmap.mmap, f.fileno(), mapsize, access=4) f.close() # test incompatible parameters if os.name == "posix": f = open(filename, "r+b") raises(ValueError, mmap.mmap, f.fileno(), mapsize, flags=mmap.MAP_PRIVATE, prot=mmap.PROT_READ, access=mmap.ACCESS_WRITE) f.close() # bad file descriptor raises(EnvironmentError, mmap.mmap, -2, 4096) # do a tougher .find() test. SF bug 515943 pointed out that, in 2.2, # searching for data with embedded \0 bytes didn't work. f = open(filename, 'w+') data = 'aabaac\x00deef\x00\x00aa\x00' n = len(data) f.write(data) f.flush() m = mmap.mmap(f.fileno(), n) f.close() for start in range(n + 1): for finish in range(start, n + 1): sl = data[start:finish] assert m.find(sl) == data.find(sl) assert m.find(sl + 'x') == -1 m.close() # test mapping of entire file by passing 0 for map length f = open(filename, "w+") f.write(2**16 * 'm') f.close() f = open(filename, "rb+") m = mmap.mmap(f.fileno(), 0) assert len(m) == 2**16 assert m.read(2**16) == 2**16 * "m" m.close() f.close() # make move works everywhere (64-bit format problem earlier) f = open(filename, 'w+') f.write("ABCDEabcde") f.flush() m = mmap.mmap(f.fileno(), 10) m.move(5, 0, 5) assert m.read(10) == "ABCDEABCDE" m.close() f.close()
def unroll(args, func, method, results): matriz_aleatoria = matriz_randomica(len(args[0]), random.randint(1,3)) # Dimensão das matrizes rows_args = len(args) cols_args = len(args[0]) rows_aleatoria = len(matriz_aleatoria) cols_aleatoria = len(matriz_aleatoria[0]) # ---------- Threads ---------- # A soma de cada elemento é feito dentro de uma thread if method == "thread": # List das threads criadas threads = [] results = [[0 for i in range(cols_aleatoria)] for j in range(rows_args)] for j in range(cols_aleatoria): m = [] for i in range(rows_aleatoria): m.append(matriz_aleatoria[i][j]) for index, arg in enumerate(args): threads.append([]) threads[-1] = threading.Thread(target=func, args=(arg, m, index, j, results)) threads[-1].start() threads[-1].join() print("------ Args ------") print_matriz(args) print("\n------ Aleatoria ------") print_matriz(matriz_aleatoria) print("\n------ Matriz soma ------") print_matriz(results) # ---------- PROCESSOS ---------- # Ainda não esta pronto, eh preciso fazer com os processos se comuniquem # provavelmente com memoria compartilhada so assim pra conseguir salvar os results # de cada soma das linhas da matriz. # No caso o processo original devera imprimir a soma completa da matriz else: global mapped_memory mapped_memory = None signal.signal(signal.SIGINT, INT_handler) # dim(results) = rows_args x cols_aleatoria print(rows_args*cols_aleatoria*4) memory = posix_ipc.SharedMemory("results", flags = posix_ipc.O_CREAT, mode = 0o777, size = rows_args*cols_aleatoria*4) mapped_memory = mmap.mmap(memory.fd, memory.size) memory.close_fd() for j in range(cols_aleatoria): m = [] for i in range(rows_aleatoria): m.append(matriz_aleatoria[i][j]) for index, arg in enumerate(args): func(arg,m,index, j, cols_aleatoria) time.sleep(0.1) print("------ Args ------") print_matriz(args) print("\n------ Aleatoria ------") print_matriz(matriz_aleatoria) print("\n------ Matriz multiplicada ------") for i in range(rows_args): for j in range(cols_aleatoria): mapped_memory.seek( (i*cols_aleatoria*4) + (j*4)) read_val = struct.unpack('>i',mapped_memory.read(4)) print(read_val[0], end=", ") print()
if (y >= feature['properties']['ymin'] and y < feature['properties']['ymax']) or \ (y == feature['properties']['ymax'] and feature['properties']['id'] in marginX): local_twitter_Count[index][ 1] += 1 # increase the twitter count by 1 tag_list = find_hashtags(local_data['doc']['text']) if len(tag_list) > 0: # if twitter is with hashtags for tag in tag_list: if (tag.lower()) in local_twitter_hashtags[index][1]: local_twitter_hashtags[index][1][tag.lower()] += 1 else: local_twitter_hashtags[index][1][tag.lower()] = 1 f = open(twitter_f_path, 'r', encoding="utf-8") with contextlib.closing(mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)) as m: m.readline() line_num = -1 # initialize the line number while True: line_num += 1 line_byte = m.readline().strip() if line_num % comm_size == comm_rank: # parallel line = str(line_byte, encoding='utf-8') if line.strip().rstrip(',') == "]}": # ignore the last line break local_data = json.loads( line.strip().rstrip(',')) # parse Json line process_location(local_data) if m.tell() == m.size(): break
def test_set_item(self): import mmap f = open(self.tmpname + "s", "w+") f.write("foobar") f.flush() m = mmap.mmap(f.fileno(), 6, access=mmap.ACCESS_READ) def fn(): m[1] = 'a' raises(TypeError, fn) m = mmap.mmap(f.fileno(), 6, access=mmap.ACCESS_WRITE) def fn(): m["foo"] = 'a' raises(TypeError, fn) def fn(): m[-7] = 'a' raises(IndexError, fn) def fn(): m[0] = 'ab' raises((IndexError, ValueError), fn) # IndexError is in CPython, # but doesn't make much sense def fn(): m[1:3] = u'xx' raises((IndexError, TypeError), fn) # IndexError is in CPython, # but doesn't make much sense def fn(): m[1:4] = "zz" raises((IndexError, ValueError), fn) def fn(): m[1:6] = "z" * 6 raises((IndexError, ValueError), fn) def fn(): m[:2] = "z" * 5 raises((IndexError, ValueError), fn) m[1:3] = 'xx' assert m.read(6) == "fxxbar" m[0] = 'x' assert m[0] == 'x' m[-6] = 'y' m[3:6:2] = 'BR' m.seek(0) data = m.read(6) assert data == "yxxBaR" m.close() f.close()
def upload_file(self, path, password=None, bucket_id=None, bucket_name=None, thread_upload_url=None, thread_upload_authorization_token=None, timeout=None): self._authorize_account(timeout) if password: (salt, key, iv) = generate_salt_key_iv(password, 32) in_file = open(path, 'rb') (sha, size) = calc_encryption_sha_and_length(in_file, password, salt, 32, key, iv) in_file.close() fp = Read2Encrypt(path, 'rb', password, salt, 32, key, iv, size=size) else: fp = open(path, 'rb') mm_file_data = mmap.mmap(fp.fileno(), 0, access=mmap.ACCESS_READ) filename = re.sub('^/', '', path) filename = re.sub('//', '/', filename) # TODO: Figure out URL encoding issue # filename = unicode(filename, "utf-8") sha = hashlib.sha1() with open(path, 'rb') as f: while True: block = f.read(2**10) if not block: break sha.update(block) sha = sha.hexdigest() if thread_upload_url: cur_upload_url = thread_upload_url cur_upload_authorization_token = thread_upload_authorization_token elif not self.upload_url or not self.upload_authorization_token: url = self.get_upload_url(bucket_name=bucket_name, bucket_id=bucket_id) cur_upload_url = url['uploadUrl'] cur_upload_authorization_token = url['authorizationToken'] # fixup filename filename = re.sub('\\\\', '/', path) # Make sure Windows paths are converted. filename = re.sub('^/', '', filename) filename = re.sub('//', '/', filename) #All the whitespaces in the filename should be converted to %20 if " " in filename: filename = filename.replace(" ", "%20") # TODO: Figure out URL encoding issue filename = unicode(filename, "utf-8") headers = { 'Authorization': cur_upload_authorization_token, 'X-Bz-File-Name': filename, 'Content-Type': 'application/octet-stream', # 'Content-Type' : 'b2/x-auto', 'X-Bz-Content-Sha1': sha } try: if password: request = urllib2.Request(cur_upload_url, fp, headers) else: request = urllib2.Request(cur_upload_url, mm_file_data, headers) response = self.__url_open_with_timeout(request, timeout) response_data = json.loads(response.read()) except urllib2.HTTPError, error: print("ERROR: %s" % error.read()) raise
def parse(path: str) -> Optional[Dict[str, List[ExternalLink]]]: """ Parse the lab.ext and return a Dict. Keys are name of collision domain and values are List of ExternalLink attached to that interface. Args: path (str): The path to lab.ext file. Returns: Optional[Dict[str, List[ExternalLink]]]: Keys are name of collision domain and values are List of ExternalLink attached to that interface. """ lab_ext_path = os.path.join(path, 'lab.ext') if not os.path.exists(lab_ext_path): return None if os.stat(lab_ext_path).st_size == 0: logging.warning("lab.ext file is empty. Ignoring...") return None # Reads lab.ext in memory so it is faster. try: with open(lab_ext_path, 'r') as ext_file: ext_mem_file = mmap.mmap(ext_file.fileno(), 0, access=mmap.ACCESS_READ) except Exception: raise IOError("Cannot open lab.ext file.") external_links = {} line_number = 1 line = ext_mem_file.readline().decode('utf-8') while line: # E.g. A enp9s0 # B enp9s0.20 matches = re.search( r"^(?P<link>\w+)\s+(?P<interface>\w+)(?P<vlan>\.\d+)?$", line.strip()) if matches: link = matches.group("link").strip() interface = matches.group("interface").strip() vlan = int(matches.group("vlan").strip().replace( ".", "")) if matches.group("vlan") else None if vlan: if 0 <= vlan >= 4095: raise Exception("[ERROR] In file lab.ext, line %d: " "VLAN ID must be in range [1, 4094]." % line_number) if link not in external_links: external_links[link] = [] external_links[link].append(ExternalLink(interface, vlan)) elif not line.startswith('#') and line.strip(): raise Exception("[ERROR] In file lab.ext, line %d malformed." % line_number) line_number += 1 line = ext_mem_file.readline().decode('utf-8') return external_links
def remapfile(self): import mmap size = os.fstat(self.fd).st_size self.mm = mmap.mmap(self.fd, size, access=self.access)
# signal to the agent os.write(write_fifo, struct.pack('i', 0)) # wait agent os.read(read_fifo, 4) # read the initalize parameters from agent coef, keys = readFromAgent(m) # y=mx+b (Slope formule) init_param = coef num_iterations = 1000 print("Running...") p = gradient_descent_runner(read_fifo, write_fifo, m, w, X, Y, init_param, learning_rate, num_iterations) print p os.write(write_fifo, struct.pack('i', 2)) # main function if __name__ == "__main__": read_fd = os.open('/dev/shm/bh_shm_w_name', os.O_RDWR | os.O_SYNC | os.O_CREAT) write_fd = os.open('/dev/shm/bh_shm_r_name', os.O_RDWR | os.O_SYNC | os.O_CREAT) read_fifo_path = '/tmp/bh_w_fifo' write_fifo_path = '/tmp/bh_r_fifo' read_fifo = os.open(read_fifo_path, os.O_SYNC | os.O_CREAT | os.O_RDWR) write_fifo = os.open(write_fifo_path, os.O_SYNC | os.O_CREAT | os.O_RDWR) m = mmap.mmap(read_fd, 804, access=mmap.ACCESS_READ) w = mmap.mmap(write_fd, 804, mmap.MAP_SHARED, mmap.PROT_WRITE) run(read_fifo, write_fifo, m, w)
def __enter__(self): self._file_object = open(self.filename, "rb") self._mmap = mmap(self._file_object.fileno(), 0, access=ACCESS_READ) self._mv = memoryview(self._mmap) self._initialize_file_metadata() return self
class PureLooseObjectODB(PureRootPathDB, PureObjectDBR, PureObjectDBW): """A database which operates on loose object files""" # CONFIGURATION # chunks in which data will be copied between streams stream_chunk_size = chunk_size # On windows we need to keep it writable, otherwise it cannot be removed # either new_objects_mode = 0444 if os.name == 'nt': new_objects_mode = 0644 def __init__(self, root_path): super(PureLooseObjectODB, self).__init__(root_path) self._hexsha_to_file = dict() # Additional Flags - might be set to 0 after the first failure # Depending on the root, this might work for some mounts, for others not, which # is why it is per instance self._fd_open_flags = getattr(os, 'O_NOATIME', 0) #{ Interface def object_path(self, hexsha): """ :return: path at which the object with the given hexsha would be stored, relative to the database root""" return join(hexsha[:2], hexsha[2:]) def readable_db_object_path(self, hexsha): """ :return: readable object path to the object identified by hexsha :raise BadObject: If the object file does not exist""" try: return self._hexsha_to_file[hexsha] except KeyError: pass # END ignore cache misses # try filesystem path = self.db_path(self.object_path(hexsha)) if exists(path): self._hexsha_to_file[hexsha] = path return path # END handle cache raise BadObject(hexsha) #} END interface def _map_loose_object(self, sha): """ :return: memory map of that file to allow random read access :raise BadObject: if object could not be located""" db_path = self.db_path(self.object_path(bin_to_hex(sha))) try: return file_contents_ro_filepath(db_path, flags=self._fd_open_flags) except OSError,e: if e.errno != ENOENT: # try again without noatime try: return file_contents_ro_filepath(db_path) except OSError: raise BadObject(sha) # didn't work because of our flag, don't try it again self._fd_open_flags = 0 else: raise BadObject(sha) # END handle error # END exception handling try: return mmap.mmap(fd, 0, access=mmap.ACCESS_READ) finally: os.close(fd)
else: print("Invalid argument for m_map, expected 'MMAP' or 'NO_MMAP' got '" + m_map + "'") exit(1) col_indices = [x for x in getColIndicesToQuery(col_names_file_path, memory_map)] with open(file_path + ".ll", 'rb') as ll_file: line_length = int(ll_file.read().rstrip()) with open(file_path + ".mccl", 'rb') as mccl_file: max_column_coord_length = int(mccl_file.read().rstrip()) with open(file_path + ".cc", 'rb') as cc_file: if memory_map: cc_file = mmap.mmap(cc_file.fileno(), 0, prot=mmap.PROT_READ) with open(file_path, 'rb') as data_file: if memory_map: data_file = mmap.mmap(data_file.fileno(), 0, prot=mmap.PROT_READ) with open(out_file_path, 'wb') as out_file: row_indices = range(num_rows + 1) if memory_map: col_coords = list(parse_data_coords(col_indices, cc_file, max_column_coord_length, line_length)) else: col_coords = list(parse_data_coords_seek(col_indices, file_path + ".cc", max_column_coord_length, line_length)) out_lines = [] if memory_map: for row_index in row_indices:
def load(self): """Load image data based on tile list""" if self.tile is None: raise OSError("cannot load this image") pixel = Image.Image.load(self) if not self.tile: return pixel self.map = None use_mmap = self.filename and len(self.tile) == 1 # As of pypy 2.1.0, memory mapping was failing here. use_mmap = use_mmap and not hasattr(sys, "pypy_version_info") readonly = 0 # look for read/seek overrides try: read = self.load_read # don't use mmap if there are custom read/seek functions use_mmap = False except AttributeError: read = self.fp.read try: seek = self.load_seek use_mmap = False except AttributeError: seek = self.fp.seek if use_mmap: # try memory mapping decoder_name, extents, offset, args = self.tile[0] if (decoder_name == "raw" and len(args) >= 3 and args[0] == self.mode and args[0] in Image._MAPMODES): try: if hasattr(Image.core, "map"): # use built-in mapper WIN32 only self.map = Image.core.map(self.filename) self.map.seek(offset) self.im = self.map.readimage(self.mode, self.size, args[1], args[2]) else: # use mmap, if possible import mmap with open(self.filename) as fp: self.map = mmap.mmap(fp.fileno(), 0, access=mmap.ACCESS_READ) self.im = Image.core.map_buffer( self.map, self.size, decoder_name, offset, args) readonly = 1 # After trashing self.im, # we might need to reload the palette data. if self.palette: self.palette.dirty = 1 except (AttributeError, OSError, ImportError): self.map = None self.load_prepare() err_code = -3 # initialize to unknown error if not self.map: # sort tiles in file order self.tile.sort(key=_tilesort) try: # FIXME: This is a hack to handle TIFF's JpegTables tag. prefix = self.tile_prefix except AttributeError: prefix = b"" for decoder_name, extents, offset, args in self.tile: decoder = Image._getdecoder(self.mode, decoder_name, args, self.decoderconfig) try: seek(offset) decoder.setimage(self.im, extents) if decoder.pulls_fd: decoder.setfd(self.fp) status, err_code = decoder.decode(b"") else: b = prefix while True: try: s = read(self.decodermaxblock) except (IndexError, struct.error) as e: # truncated png/gif if LOAD_TRUNCATED_IMAGES: break else: raise OSError( "image file is truncated") from e if not s: # truncated jpeg if LOAD_TRUNCATED_IMAGES: break else: raise OSError( "image file is truncated " f"({len(b)} bytes not processed)") b = b + s n, err_code = decoder.decode(b) if n < 0: break b = b[n:] finally: # Need to cleanup here to prevent leaks decoder.cleanup() self.tile = [] self.readonly = readonly self.load_end() if self._exclusive_fp and self._close_exclusive_fp_after_loading: self.fp.close() self.fp = None if not self.map and not LOAD_TRUNCATED_IMAGES and err_code < 0: # still raised if decoder fails to return anything raise_oserror(err_code) return Image.Image.load(self)
def __init__(self, path): super().__init__(path) self._map_obj = mmap.mmap(self.path.fileno(), 0, prot=mmap.PROT_READ)
def __init__(self, filename, encoding=None, password=None, cached=True, check=False, current_tablename=None, date_fieldname=None, time_fieldname=None, decryptor_class=TpsDecryptor): self.filename = filename self.encoding = encoding self.password = password self.cached = cached self.check = check self.current_table_number = None # Name part before .tps self.name = os.path.basename(filename) self.name = text_type(os.path.splitext(self.name)[0]).lower() if date_fieldname is not None: self.date_fieldname = date_fieldname else: self.date_fieldname = [] if time_fieldname is not None: self.time_fieldname = time_fieldname else: self.time_fieldname = [] self.cache_pages = {} if not os.path.isfile(self.filename): raise FileNotFoundError(self.filename) self.file_size = os.path.getsize(self.filename) # Check file size if check: if self.file_size & 0x3F != 0: # TODO check translate warn('File size is not a multiple of 64 bytes.', RuntimeWarning) with open(self.filename, mode='r+b') as tpsfile: self.tps_file = mmap.mmap(tpsfile.fileno(), 0) self.decryptor = decryptor_class(self.tps_file, self.password) try: # TPS file header header = Struct( 'header', ULInt32('offset'), ULInt16('size'), ULInt32('file_size'), ULInt32('allocated_file_size'), Const(Bytes('top_speed_mark', 6), b'tOpS\x00\x00'), UBInt32('last_issued_row'), ULInt32('change_count'), ULInt32('page_root_ref'), Array(lambda ctx: (ctx['size'] - 0x20) / 2 / 4, ULInt32('block_start_ref')), Array(lambda ctx: (ctx['size'] - 0x20) / 2 / 4, ULInt32('block_end_ref')), ) self.header = header.parse(self.read(0x200)) self.pages = TpsPagesList(self, self.header.page_root_ref, check=self.check) self.tables = TpsTablesList(self, encoding=self.encoding, check=self.check) self.set_current_table(current_tablename) except adapters.ConstError: print('Bad cryptographic keys.')
def run(self): # Set up shared memory map (offset makes it so bot only writes to its own input!) and map to buffer filename = "" buff = mmap.mmap(-1, ctypes.sizeof(bi.GameInputPacket), INPUT_SHARED_MEMORY_TAG) bot_input = bi.GameInputPacket.from_buffer(buff) player_input = bot_input.sPlayerInput[self.index] player_input_lock = (ctypes.c_long).from_address( ctypes.addressof(player_input)) # Set up shared memory for game data game_data_shared_memory = mmap.mmap( -1, ctypes.sizeof(gd.GameTickPacketWithLock), OUTPUT_SHARED_MEMORY_TAG) bot_output = gd.GameTickPacketWithLock.from_buffer( game_data_shared_memory) lock = ctypes.c_long(0) game_tick_packet = gd.GameTickPacket( ) # We want to do a deep copy for game inputs so people don't mess with em # Create Ratelimiter r = rate_limiter.RateLimiter(GAME_TICK_PACKET_REFRESHES_PER_SECOND) last_tick_game_time = None # What the tick time of the last observed tick was last_call_real_time = datetime.now() # When we last called the Agent # Find car with same name and assign index for i in range(MAX_CARS): if str(bot_output.gamecars[i].wName) == self.name: self.index = i continue # Get bot module agent_module = importlib.import_module(self.module_name) # Create bot from module agent = self.load_agent(agent_module) if hasattr(agent, 'create_model_hash'): self.model_hash = agent.create_model_hash() else: self.model_hash = 0 self.server_manager.set_model_hash(self.model_hash) last_module_modification_time = os.stat(agent_module.__file__).st_mtime if hasattr(agent, 'is_evaluating'): self.is_eval = agent.is_evaluating self.server_manager.set_is_eval(self.is_eval) if self.save_data: filename = self.create_file_name() print('creating file ' + filename) self.create_new_file(filename) old_time = 0 counter = 0 last_module_modification_time = os.stat(agent_module.__file__).st_mtime # Run until main process tells to stop while not self.terminateEvent.is_set(): before = datetime.now() before2 = time.time() # Read from game data shared memory game_data_shared_memory.seek( 0) # Move to beginning of shared memory ctypes.memmove( ctypes.addressof(lock), game_data_shared_memory.read(ctypes.sizeof(lock)), ctypes.sizeof(lock) ) # dll uses InterlockedExchange so this read will return the correct value! if lock.value != REFRESH_IN_PROGRESS: game_data_shared_memory.seek( 4, os.SEEK_CUR) # Move 4 bytes past error code ctypes.memmove( ctypes.addressof(game_tick_packet), game_data_shared_memory.read( ctypes.sizeof(gd.GameTickPacket)), ctypes.sizeof( gd.GameTickPacket)) # copy shared memory into struct if game_tick_packet.gameInfo.bMatchEnded: print('\n\n\n\n Match has ended so ending bot loop\n\n\n\n\n') break controller_input = None # Run the Agent only if the gameInfo has updated. tick_game_time = game_tick_packet.gameInfo.TimeSeconds should_call_while_paused = datetime.now( ) - last_call_real_time >= MAX_AGENT_CALL_PERIOD if tick_game_time != last_tick_game_time or should_call_while_paused: last_tick_game_time = tick_game_time last_call_real_time = datetime.now() try: # Reload the Agent if it has been modified. new_module_modification_time = os.stat( agent_module.__file__).st_mtime if new_module_modification_time != last_module_modification_time: last_module_modification_time = new_module_modification_time print('Reloading Agent: ' + agent_module.__file__) importlib.reload(agent_module) old_agent = agent agent = self.load_agent(agent_module) # Retire after the replacement initialized properly. if hasattr(old_agent, 'retire'): old_agent.retire() # Call agent controller_input = agent.get_output_vector( game_tick_packet) if not controller_input: raise Exception( 'Agent "{}" did not return a player_input tuple.'. format(agent_module.__file__)) # Write all player inputs player_input.fThrottle = controller_input[0] player_input.fSteer = controller_input[1] player_input.fPitch = controller_input[2] player_input.fYaw = controller_input[3] player_input.fRoll = controller_input[4] player_input.bJump = controller_input[5] player_input.bBoost = controller_input[6] player_input.bHandbrake = controller_input[7] except Exception as e: traceback.print_exc() # Workaround for windows streams behaving weirdly when not in command prompt sys.stdout.flush() sys.stderr.flush() current_time = game_tick_packet.gameInfo.TimeSeconds if self.save_data and game_tick_packet.gameInfo.bRoundActive and not old_time == current_time and not current_time == -10: np_input = self.input_converter.create_input_array( game_tick_packet, passed_time=current_time - old_time) np_output = np.array(controller_input, dtype=np.float32) self.input_array = np.append(self.input_array, np_input) self.output_array = np.append(self.output_array, np_output) if self.frames % self.batch_size == 0 and not self.frames == 0: print('writing big array', self.frames % (self.batch_size * self.upload_size)) compressor.write_array_to_file(self.game_file, self.input_array) compressor.write_array_to_file(self.game_file, self.output_array) self.input_array = np.array([]) self.output_array = np.array([]) if self.frames % (self.batch_size * self.upload_size ) == 0 and not self.frames == 0: print('adding new file and uploading') self.file_number += 1 self.game_file.close() print('creating file ' + filename) self.maybe_compress_and_upload(filename) filename = self.create_file_name() self.create_new_file(filename) self.maybe_delete(self.file_number - 3) if self.frames % ( self.batch_size * self.upload_size * self.retry_size) == 0 and not self.frames == 0: try: self.server_manager.retry_files() except Exception: print('failed to retry uploading files') self.frames += 1 old_time = current_time # Ratelimit here after = datetime.now() after2 = time.time() # cant ever drop below 40 frames if after2 - before2 > 0.025: print('Too slow for ' + self.name + ': ' + str(after2 - before2) + ' frames since slowdown ' + str(counter)) counter = 0 else: counter += 1 r.acquire(after - before) if hasattr(agent, 'retire'): agent.retire() # If terminated, send callback print("something ended closing file") if self.save_data: self.maybe_compress_and_upload(filename) self.server_manager.retry_files() print('done with bot') self.callbackEvent.set()
# -*- coding:utf-8 -*- # Example 2-13. 使用 mmap 模块 # File: mmap-example-1.py import mmap import os filename = "samples/sample.txt" file = open(filename, "r+") size = os.path.getsize(filename) data = mmap.mmap(file.fileno(), size) # basics print data print len(data), size # use slicing to read from the file # 使用切片操作读取文件 print repr(data[:10]), repr(data[:10]) # or use the standard file interface # 或使用标准的文件接口 print repr(data.read(10)), repr(data.read(10))
def __new__(subtype, filename, dtype=uint8, mode='r+', offset=0, shape=None, order='C'): # Import here to minimize 'import numpy' overhead import mmap import os.path try: mode = mode_equivalents[mode] except KeyError: if mode not in valid_filemodes: raise ValueError("mode must be one of %s" % (valid_filemodes + list(mode_equivalents.keys()))) if mode == 'w+' and shape is None: raise ValueError("shape must be given") if hasattr(filename, 'read'): f_ctx = contextlib_nullcontext(filename) else: f_ctx = open(os_fspath(filename), ('r' if mode == 'c' else mode)+'b') with f_ctx as fid: fid.seek(0, 2) flen = fid.tell() descr = dtypedescr(dtype) _dbytes = descr.itemsize if shape is None: bytes = flen - offset if bytes % _dbytes: raise ValueError("Size of available data is not a " "multiple of the data-type size.") size = bytes // _dbytes shape = (size,) else: if not isinstance(shape, tuple): shape = (shape,) size = np.intp(1) # avoid default choice of np.int_, which might overflow for k in shape: size *= k bytes = long(offset + size*_dbytes) if mode in ('w+', 'r+') and flen < bytes: fid.seek(bytes - 1, 0) fid.write(b'\0') fid.flush() if mode == 'c': acc = mmap.ACCESS_COPY elif mode == 'r': acc = mmap.ACCESS_READ else: acc = mmap.ACCESS_WRITE start = offset - offset % mmap.ALLOCATIONGRANULARITY bytes -= start array_offset = offset - start mm = mmap.mmap(fid.fileno(), bytes, access=acc, offset=start) self = ndarray.__new__(subtype, shape, dtype=descr, buffer=mm, offset=array_offset, order=order) self._mmap = mm self.offset = offset self.mode = mode if is_pathlib_path(filename): # special case - if we were constructed with a pathlib.path, # then filename is a path object, not a string self.filename = filename.resolve() elif hasattr(fid, "name") and isinstance(fid.name, basestring): # py3 returns int for TemporaryFile().name self.filename = os.path.abspath(fid.name) # same as memmap copies (e.g. memmap + 1) else: self.filename = None return self
def writeToWordlist(content, wordlist): f = open(wordlist, 'a+') s = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) filename = content.split('/')[-1] if s.find(bytes(filename,'utf-8')) == -1: f.write(filename + '\n')