def read_forever(self): """ This function reads the log file one line at a time, reopening it if it changes (due to logrotate for example). This function will loop until the program ends or is explicitly told to stop reading the log file. """ try: self.mc_log_fh = open(self.mc_log, 'r') self.curinode = os.fstat(self.mc_log_fh.fileno()).st_ino while self.log_read: while self.log_read: self.buf = self.mc_log_fh.readline() if self.buf == "": break self.handle_message(self.buf, False) try: if os.stat(self.mc_log).st_ino != self.curinode: self.mc_new_fh = open(self.mc_log, "r") self.mc_log_fh.close() self.mc_log_fh = self.mc_new_fg self.curinode = os.fstat(self.mc_log_fh.fileno()).st_ino continue # dont bother sleeping since there is a new log file except IOError: pass time.sleep(0.1) except: self.log.error('Craftoria: unable to open or read log file %s: %s', sys.exc_type, sys.exc_value)
def _actually_load_xml(self, fobj, fn, this_id, load_backup): #print("ACTUALLY LOAD") tmpobj = None if (self._load_timestamp.get(this_id, 0) != os.fstat(fobj.fileno()).st_ctime): import time b4=time.time() tmpobj, errs = self.from_file(fobj) a4=time.time() logger.debug("Loading XML file for ID: %s took %s sec" % (this_id, str(a4-b4))) has_children = (self.sub_split is not None) and (self.sub_split in tmpobj.getNodeData()) and len(tmpobj.getNodeAttribute(self.sub_split)) == 0 if this_id in self.objects: self._must_actually_load_xml(fobj, fn, this_id, load_backup, has_children, tmpobj, errs) else: #tmpobj.setNodeIndexCache(None) self._internal_setitem__(this_id, tmpobj) if hasattr(self.objects[this_id], self.sub_split): sub_attr = getattr(self.objects[this_id], self.sub_split) if sub_attr is not None and hasattr(sub_attr, '_setParent'): sub_attr._setParent(self.objects[this_id]) self._load_timestamp[this_id] = os.fstat(fobj.fileno()).st_ctime else: logger.debug("Didn't Load Job ID: %s" % str(this_id)) logger.debug("Finished Loading XML")
def test_contextmanager(): fn = None with Image.open("Images/lena.gif") as im: fn = im.fp.fileno() assert_no_exception(lambda: os.fstat(fn)) assert_exception(OSError, lambda: os.fstat(fn))
def get_data_file(args): if args.file: return open(args.file, 'rb') else: # distinguish cases where: # (1) stdin is not valid (as in cron jobs): # glance ... <&- # (2) image data is provided through standard input: # glance ... < /tmp/file or cat /tmp/file | glance ... # (3) no image data provided: # glance ... try: os.fstat(0) except OSError: # (1) stdin is not valid (closed...) return None if not sys.stdin.isatty(): # (2) image data is provided through standard input image = sys.stdin if hasattr(sys.stdin, 'buffer'): image = sys.stdin.buffer if msvcrt: msvcrt.setmode(image.fileno(), os.O_BINARY) return image else: # (3) no image data provided return None
def fix_savedata(dir): if (not os.path.isdir(dir) or not os.path.isfile(dir+"/SYS.BIN") ): ErrorMessageBox("Ŀ¼´íÎó") import mmap fd = os.open(dir+"/SYS.BIN", os.O_RDWR) buf = mmap.mmap(fd, os.fstat(fd).st_size, access=mmap.ACCESS_WRITE) if (buf[0:8] != "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF"): print "Bad savedata or not decrypted. SYS.BIN" ErrorMessageBox("´æµµ´íÎó") for pos in range(0x269480, 0x269480 + 0x1258 * 100) : if buf[pos:pos+4] == "\0\0\0\2" : buf[pos+0x18:pos+0x58] = "\0\0\0\0" * 0x10 pos+=0x1258 os.close(fd) print 'Fix SYS.BIN.' import fnmatch zstr = "\0\0\0\0" * ((0x8A358 - 0x46358) / 4) for directory, subdirectories, files in os.walk(dir): for file in files: if fnmatch.fnmatch(file, 'SAVE???.BIN'): fd = os.open(os.path.join(directory, file), os.O_RDWR) buf = mmap.mmap(fd, os.fstat(fd).st_size, access=mmap.ACCESS_WRITE) if (buf[0:4] != "\0\0\0\2") : print "Bad savedata or not decrypted. %s" % file ErrorMessageBox("´æµµ´íÎó»òδ½âÃÜ") buf[0x18:0x58] = "\0\0\0\0" * 0x10 buf[0x46358:0x8A358] = zstr os.close(fd) print 'Fix %s.' % (file) windll.user32.MessageBoxA(None, "´æµµÐÞÕýÍê³É!", EXE_TITLE, 0)
def read_module_files(module_data, limit=None, **addl_props): """ Read the files belonging to a module from disk and return their data as a dictionary. """ files = {} if 'te_file' in module_data: with open(module_data['te_file']) as fin: info = os.fstat(fin.fileno()) handle = itertools.islice(fin, limit) files['te'] = {'text': "".join(handle)} files['te'].update(**addl_props) files['te']['size'] = info.st_size if 'if_file' in module_data: with open(module_data['if_file']) as fin: info = os.fstat(fin.fileno()) handle = itertools.islice(fin, limit) files['if'] = {'text': "".join(handle)} files['if'].update(**addl_props) files['if']['size'] = info.st_size if 'fc_file' in module_data: with open(module_data['fc_file']) as fin: info = os.fstat(fin.fileno()) handle = itertools.islice(fin, limit) files['fc'] = {'text': "".join(handle)} files['fc'].update(**addl_props) files['fc']['size'] = info.st_size return files
def file_contents_ro(fd, stream=False, allow_mmap=True): """:return: read-only contents of the file represented by the file descriptor fd :param fd: file descriptor opened for reading :param stream: if False, random access is provided, otherwise the stream interface is provided. :param allow_mmap: if True, its allowed to map the contents into memory, which allows large files to be handled and accessed efficiently. The file-descriptor will change its position if this is False""" try: if allow_mmap: # supports stream and random access try: return mmap.mmap(fd, 0, access=mmap.ACCESS_READ) except EnvironmentError: # python 2.4 issue, 0 wants to be the actual size return mmap.mmap(fd, os.fstat(fd).st_size, access=mmap.ACCESS_READ) # END handle python 2.4 except OSError: pass # END exception handling # read manully contents = os.read(fd, os.fstat(fd).st_size) if stream: return _RandomAccessBytesIO(contents) return contents
def __init__ (self, fd, args=[], timeout=30, maxread=2000, searchwindowsize=None, logfile=None): """This takes a file descriptor (an int) or an object that support the fileno() method (returning an int). All Python file-like objects support fileno(). """ ### TODO: Add better handling of trying to use fdspawn in place of spawn ### TODO: (overload to allow fdspawn to also handle commands as spawn does. if type(fd) != type(0) and hasattr(fd, 'fileno'): fd = fd.fileno() if type(fd) != type(0): raise ExceptionPexpect ('The fd argument is not an int. If this is a command string then maybe you want to use pexpect.spawn.') try: # make sure fd is a valid file descriptor os.fstat(fd) except OSError: raise ExceptionPexpect('The fd argument is not a valid file descriptor.') self.args = None self.command = None spawn.__init__(self, None, args, timeout, maxread, searchwindowsize, logfile) self.child_fd = fd self.own_fd = False self.closed = False self.name = '<file descriptor %d>' % fd
def followTail(self, fileobj=None, fstat=None): if fileobj is None: fileobj = open(self.filename) if self.seekend: fileobj.seek(0, 2) line = fileobj.read() if line: self.dataReceived(line) if fstat is None: fstat = os.fstat(fileobj.fileno()) try: stat = os.stat(self.filename) except: stat = fstat if self.fileIdentity(stat) != self.fileIdentity(fstat): fileobj = open(self.filename) fstat = os.fstat(fileobj.fileno()) self.fileReset() if self.keeprunning: reactor.callLater(self.delay, lambda: self.followTail(fileobj, fstat))
def test_object_run_once_multi_devices(self): self.auditor = auditor.AuditorWorker(self.conf, self.logger) timestamp = str(normalize_timestamp(time.time())) pre_quarantines = self.auditor.quarantines data = '0' * 10 etag = md5() with self.disk_file.writer() as writer: writer.write(data) etag.update(data) etag = etag.hexdigest() metadata = { 'ETag': etag, 'X-Timestamp': timestamp, 'Content-Length': str(os.fstat(writer.fd).st_size), } writer.put(metadata) self.auditor.audit_all_objects() self.disk_file = DiskFile(self.devices, 'sdb', '0', 'a', 'c', 'ob', self.logger) data = '1' * 10 etag = md5() with self.disk_file.writer() as writer: writer.write(data) etag.update(data) etag = etag.hexdigest() metadata = { 'ETag': etag, 'X-Timestamp': timestamp, 'Content-Length': str(os.fstat(writer.fd).st_size), } writer.put(metadata) os.write(writer.fd, 'extra_data') self.auditor.audit_all_objects() self.assertEquals(self.auditor.quarantines, pre_quarantines + 1)
def test_handler_existing_file(self): handler = FakeHandler([]) with tempfile.NamedTemporaryFile(dir="") as f_obj_a: with handler.parent_open(f_obj_a.name) as f_obj_b: self.assertEqual(os.fstat(f_obj_a.fileno()).st_ino, os.fstat(f_obj_b.fileno()).st_ino) self.assertEqual(f_obj_a.name, f_obj_b.name)
def __init__ (self, fd, args=None, timeout=30, maxread=2000, searchwindowsize=None, logfile=None, encoding=None, codec_errors='strict'): '''This takes a file descriptor (an int) or an object that support the fileno() method (returning an int). All Python file-like objects support fileno(). ''' if type(fd) != type(0) and hasattr(fd, 'fileno'): fd = fd.fileno() if type(fd) != type(0): raise ExceptionPexpect('The fd argument is not an int. If this is a command string then maybe you want to use pexpect.spawn.') try: # make sure fd is a valid file descriptor os.fstat(fd) except OSError: raise ExceptionPexpect('The fd argument is not a valid file descriptor.') self.args = None self.command = None SpawnBase.__init__(self, timeout, maxread, searchwindowsize, logfile, encoding=encoding, codec_errors=codec_errors) self.child_fd = fd self.own_fd = False self.closed = False self.name = '<file descriptor %d>' % fd
def test_contextmanager(self): fn = None with Image.open("Tests/images/hopper.gif") as im: fn = im.fp.fileno() os.fstat(fn) self.assertRaises(OSError, os.fstat, fn)
def _open_next(self, fd): current = os.path.join(self.dir, 'current') current_fd = _open_nonblock(current) pat = os.path.join(self.dir, '@*.[su]') files = glob.glob(pat) files.sort() files.append(current) inode = os.fstat(fd).st_ino os.close(fd) for idx, f in enumerate(files): if os.stat(f).st_ino == inode: break else: log.warn("multilog: can't find before file. (inode=%d)", inode) return os.fstat(current_fd).st_ino, current_fd idx += 1 if len(files) == idx: log.warn("multilog: rotate current => current?") return os.fstat(current_fd).st_ino, current_fd next_file = files[idx] if next_file == current: return os.fstat(current_fd).st_ino, current_fd os.close(current_fd) return os.stat(next_file).st_ino, _open_nonblock(next_file)
def test_object_run_fast_track_non_zero(self): self.auditor = auditor.ObjectAuditor(self.conf) self.auditor.log_time = 0 data = '0' * 1024 etag = md5() with self.disk_file.mkstemp() as (fd, tmppath): os.write(fd, data) etag.update(data) etag = etag.hexdigest() metadata = { 'ETag': etag, 'X-Timestamp': str(normalize_timestamp(time.time())), 'Content-Length': str(os.fstat(fd).st_size), 'Original-Content-Length': str(os.fstat(fd).st_size) } self.disk_file.put(fd, tmppath, metadata) etag = md5() etag.update('1' + '0' * 1023) etag = etag.hexdigest() metadata['ETag'] = etag write_metadata(fd, metadata) quarantine_path = os.path.join(self.devices, 'sda', 'quarantined', 'objects') self.auditor.run_once(zero_byte_fps=50) self.assertFalse(os.path.isdir(quarantine_path)) self.auditor.run_once() self.assertTrue(os.path.isdir(quarantine_path))
def actionGetFile(self, params): site = self.sites.get(params["site"]) if not site or not site.settings["serving"]: # Site unknown or not serving self.response({"error": "Unknown site"}) return False try: file_path = site.storage.getPath(params["inner_path"]) if config.debug_socket: self.log.debug("Opening file: %s" % file_path) with StreamingMsgpack.FilePart(file_path, "rb") as file: file.seek(params["location"]) file.read_bytes = FILE_BUFF back = { "body": file, "size": os.fstat(file.fileno()).st_size, "location": min(file.tell() + FILE_BUFF, os.fstat(file.fileno()).st_size) } if config.debug_socket: self.log.debug( "Sending file %s from position %s to %s" % (file_path, params["location"], back["location"]) ) self.response(back, streaming=True) if config.debug_socket: self.log.debug("File %s sent" % file_path) # Add peer to site if not added before connected_peer = site.addPeer(self.connection.ip, self.connection.port) if connected_peer: # Just added connected_peer.connect(self.connection) # Assign current connection to peer except Exception, err: self.log.debug("GetFile read error: %s" % Debug.formatException(err)) self.response({"error": "File read error: %s" % Debug.formatException(err)}) return False
def actionStreamFile(self, params): site = self.sites.get(params["site"]) if not site or not site.settings["serving"]: # Site unknown or not serving self.response({"error": "Unknown site"}) return False try: if config.debug_socket: self.log.debug("Opening file: %s" % params["inner_path"]) with site.storage.open(params["inner_path"]) as file: file.seek(params["location"]) stream_bytes = min(FILE_BUFF, os.fstat(file.fileno()).st_size-params["location"]) back = { "size": os.fstat(file.fileno()).st_size, "location": min(file.tell() + FILE_BUFF, os.fstat(file.fileno()).st_size), "stream_bytes": stream_bytes } if config.debug_socket: self.log.debug( "Sending file %s from position %s to %s" % (params["inner_path"], params["location"], back["location"]) ) self.response(back) self.sendRawfile(file, read_bytes=FILE_BUFF) if config.debug_socket: self.log.debug("File %s sent" % params["inner_path"]) # Add peer to site if not added before connected_peer = site.addPeer(self.connection.ip, self.connection.port) if connected_peer: # Just added connected_peer.connect(self.connection) # Assign current connection to peer except Exception, err: self.log.debug("GetFile read error: %s" % Debug.formatException(err)) self.response({"error": "File read error: %s" % Debug.formatException(err)}) return False
def test_object_audit_diff_data(self): self.auditor = auditor.AuditorWorker(self.conf) data = '0' * 1024 etag = md5() timestamp = str(normalize_timestamp(time.time())) with self.disk_file.mkstemp() as (fd, tmppath): os.write(fd, data) etag.update(data) etag = etag.hexdigest() metadata = { 'ETag': etag, 'X-Timestamp': timestamp, 'Content-Length': str(os.fstat(fd).st_size), 'Original-Content-Length': str(os.fstat(fd).st_size) } self.disk_file.put(fd, tmppath, metadata) pre_quarantines = self.auditor.quarantines # remake so it will have metadata self.disk_file = DiskFile(self.devices, 'sda', '0', 'a', 'c', 'o', self.logger) self.auditor.object_audit( os.path.join(self.disk_file.datadir, timestamp + '.data'), 'sda', '0') self.assertEquals(self.auditor.quarantines, pre_quarantines) etag = md5() etag.update('1' + '0' * 1023) etag = etag.hexdigest() metadata['ETag'] = etag write_metadata(fd, metadata) self.auditor.object_audit( os.path.join(self.disk_file.datadir, timestamp + '.data'), 'sda', '0') self.assertEquals(self.auditor.quarantines, pre_quarantines + 1)
def main(): parser = argparse.ArgumentParser(description='Check Memrise for data and possibly upload to Beeminder.') parser.add_argument('-c', '--config-file', metavar='FILENAME', type=argparse.FileType('r'), help='The config file containing your settings. A default one is included in the distribution.') parser.add_argument('-d', '--database-file', metavar='FILENAME', type=argparse.FileType('r+'), help='The database file to store persistent data in (e.g. we find newly learned words by monitoring total word count)') args = parser.parse_args() if not args.config_file: if os.path.exists(CONFIG_DEFAULT): args.config_file = open(CONFIG_DEFAULT, 'r') else: exit('Error: No config file specified and no default config in {0}'.format(CONFIG_DEFAULT)) config = load_config(args.config_file) if not args.database_file: if not os.path.exists(DATABASE_DEFAULT): print "Database file not specified, and default doesn't exist. Creating..." args.database_file = open(DATABASE_DEFAULT, 'w+') init_database(args.database_file, config) else: args.database_file = open(DATABASE_DEFAULT, 'r+') else: print os.fstat(args.database_file.fileno()) if os.fstat(args.database_file.fileno()).st_size == 0: init_database(args.database_file, config) b = BeeminderSession(config.get('beeminder', 'username'), config.get('beeminder', 'auth_token')) # Here we hardcode total_count monitoring, since the API isn't that great, and what I hoped to do seems impossible (without HTML scraping...) do_monitor_totalcount(config.get('memrise', 'username'), args.database_file, b, config.get('beeminder', 'goal_slug')) cleanup(args.database_file, args.config_file)
def _set_data_field(fields, args): if 'location' not in fields and 'copy_from' not in fields: if args.file: fields['data'] = open(args.file, 'rb') else: # distinguish cases where: # (1) stdin is not valid (as in cron jobs): # glance ... <&- # (2) image data is provided through standard input: # glance ... < /tmp/file or cat /tmp/file | glance ... # (3) no image data provided: # glance ... try: os.fstat(0) except OSError: # (1) stdin is not valid (closed...) fields['data'] = None return if not sys.stdin.isatty(): # (2) image data is provided through standard input if msvcrt: msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY) fields['data'] = sys.stdin else: # (3) no image data provided fields['data'] = None
def __init__(self, fh, mimetype, chunksize=DEFAULT_CHUNK_SIZE, resumable=False): """Constructor. Args: fh: io.Base or file object, The source of the bytes to upload. MUST be opened in blocking mode, do not use streams opened in non-blocking mode. mimetype: string, Mime-type of the file. If None then a mime-type will be guessed from the file extension. chunksize: int, File will be uploaded in chunks of this many bytes. Only used if resumable=True. resumable: bool, True if this is a resumable upload. False means upload in a single request. """ self._fh = fh self._mimetype = mimetype self._chunksize = chunksize self._resumable = resumable self._size = None try: if hasattr(self._fh, 'fileno'): fileno = self._fh.fileno() # Pipes and such show up as 0 length files. size = os.fstat(fileno).st_size if size: self._size = os.fstat(fileno).st_size except IOError: pass
def _fd_closed(fd): try: os.fstat(fd) except (OSError, IOError), exc: if exc.errno == errno.EBADF: return True raise
def __iter__(self): if not self.file: self._reopen(True) try: need_reopen = not os.path.samestat(os.fstat(self.file.fileno()), os.stat(self.fname)) except: need_reopen = True if self.file: newpos = os.fstat(self.file.fileno()).st_size if newpos > self.pos + self.max_size: self.pos = newpos self.buf = '' return self.file.seek(self.pos) if self.buf: self.buf += self.file.readline() if self.buf.endswith('\n') or len(self.buf) > self.max_size: yield self.buf self.buf = '' line = None for line in self.file: if line.endswith('\n'): yield line if line and not line.endswith('\n'): self.buf = line self.pos = self.file.tell() if need_reopen: self._reopen(True)
def pipe_read( self, pipe, minimum_to_read ): ## Hackaround since Windows doesn't support select( ) except for sockets. dbg_print( 'pipe_read: minimum to read is ' + str( minimum_to_read ) ) time.sleep( self.delay ) count = 0 count = os.fstat( pipe )[stat.ST_SIZE] data = '' while ( count > 0 ): data += os.read( pipe, 1 ) count = os.fstat( pipe )[stat.ST_SIZE] ## Be sure to break the read, if asked to do so, ## after we've read in a line termination. if minimum_to_read != 0 and data[ len(data) -1 ] == '\n': if len( data ) >= minimum_to_read: dbg_print( 'pipe_read: read at least the minimum asked for' ) break
def close(self): self.file.close() hash = urlsafe_b64encode(self.hash.digest()) filename = blob_filename(hash) dirname = os.path.dirname(filename) if os.path.exists(filename): origfile = open(filename, 'rb') newfile = open(self.file.name, 'rb') origlen = os.fstat(origfile.fileno()).st_size newlen = os.fstat(newfile.fileno()).st_size if origlen != newlen: raise Exception('HASH COLLISION! {0} {1}'.format(filename, self.file.name)) while origlen > 0: origdata = origfile.read(min(origlen, 1024)) newdata = newfile.read(min(origlen, 1024)) if origdata != newdata: raise Exception('HASH COLLISION! {0} {1}'.format(filename, self.file.name)) origlen -= 1024 origfile.close() newfile.close() ensuredirs(dirname, 0700) os.rename(self.file.name, filename) os.utime(filename, None) if self.on_close: self.on_close(hash) return hash
def split_multimol2(multimol2): """ Splits a multi-mol2 file. Parameters ---------- multimol2 : str Path to the multi-mol2 file. Returns ---------- A generator object for lists for every extracted mol2-file. Lists contain the molecule ID and the mol2 file contents. e.g., ['ID1234', '@<TRIPOS>MOLECULE...' """ with open(multimol2, 'r') as mol2file: line = mol2file.readline() while not mol2file.tell() == os.fstat(mol2file.fileno()).st_size: if line.startswith("@<TRIPOS>MOLECULE"): mol2cont = [] mol2cont.append(line) line = mol2file.readline() molecule_id = line.strip() while not line.startswith("@<TRIPOS>MOLECULE"): mol2cont.append(line) line = mol2file.readline() if mol2file.tell() == os.fstat(mol2file.fileno()).st_size: mol2cont.append(line) break mol2cont[-1] = mol2cont[-1].rstrip() # removes blank line at file end yield [molecule_id, "".join(mol2cont)]
def _putFile(inPath, outUrl, method='PUT', session=None, callback=None): if hasattr(inPath, 'read'): inFile = inPath if hasattr(inPath, 'size'): size = inFile.size elif hasattr(inPath, 'getvalue'): size = len(inPath.getvalue()) elif hasattr(inPath, 'fileno'): size = os.fstat(inPath.fileno()).st_size else: raise TypeError("Can't determine size of file") else: inFile = open(inPath) size = os.fstat(inFile.fileno()).st_size headers = {} if session: headers['Cookie'] = 'vmware_soap_session=%s; $Path=/' % session response = _makeConnection(outUrl, method, headers, inFile, bodyLength=size, callback=callback) if response and response.status not in (200, 201): raise RuntimeError('%s failed: %d - %s' % ( method, response.status, response.reason)) elif not response: raise RuntimeError('%s failed' % method) response.close()
def truncate_and_unlink(path, increment=256*1024**2, delay=0.2): """Truncate a file to zero bytes before unlinking Truncation is done in ``increment`` bytes with a sleep delay of ``delay`` seconds between each truncation step. Once the file is zero bytes in size it will be removed/unlinked from the filesystem. :raises: OSError on error """ LOG.debug("truncate_and_unlink(path=%r, increment=%r, delay=%.2fs)", path, format_bytes(increment), delay) fd = os.open(path, os.O_RDWR) size = os.fstat(fd).st_size while size > 0: start = time.time() length = size - increment if length < 0: length = 0 os.ftruncate(fd, length) LOG.debug("truncate(%s, length=%s) in %.2fs", path, format_bytes(length), time.time() - start) time.sleep(delay) size = os.fstat(fd).st_size os.unlink(path) LOG.debug("unlink(%s)", path)
def execute(target, directory): page_size = resource.getpagesize() rows = [] if directory: for (path, dirs, files) in os.walk(target): for myfile in files: f = os.path.join(path, myfile) fd = file(f,'r') file_size = os.fstat(fd.fileno())[stat.ST_SIZE] if file_size == 0: fd.close() continue pages_cached, pages_total = ftools.fincore_ratio(fd.fileno()) fd.close() rows.append([f, file_size, pages_total, pages_cached, (pages_cached * page_size), (float(pages_cached) / float(pages_total)) * 100.0]) else: fd = file(target, 'r') file_size = os.fstat(fd.fileno())[stat.ST_SIZE] if file_size == 0: fd.close() return None else: pages_cached, pages_total = ftools.fincore_ratio(fd.fileno()) fd.close() rows.append([target, file_size, pages_total, pages_cached, (pages_cached * page_size), (float(pages_cached) / float(pages_total)) * 100.0]) rows = sorted(rows, key=lambda t: t[5], reverse=True) return rows
def test_contextmanager(self): fn = None with Image.open("Images/lena.gif") as im: fn = im.fp.fileno() os.fstat(fn) self.assertRaises(OSError, lambda: os.fstat(fn))
def send_data(v_vars, v_files, boundary, sock=None): l = 0 for (k, v) in v_vars: buffer = '' buffer += '--%s\r\n' % boundary buffer += 'Content-Disposition: form-data; name="%s"\r\n' % k buffer += '\r\n' buffer += v + '\r\n' if sock: sock.send(buffer) l += len(buffer) for (k, v) in v_files: fd = v if not hasattr(fd, 'read'): raise TypeError("file descriptor MUST have read attribute") if hasattr(fd, 'fileno'): # a File try: name = fd.name.split(os.path.sep)[-1] file_size = os.fstat(fd.fileno())[stat.ST_SIZE] except io.UnsupportedOperation: name = fd.name file_size = len(fd.getvalue()) fd.seek(0) elif hasattr(fd, 'len'): # StringIO name = k file_size = fd.len fd.seek(0) # START else: raise TypeError( "file descriptor might be File of StringIO but MUST have fileno or len attribute" ) if isinstance(name, unicode): name = name.encode('UTF-8') buffer = '' buffer += '--%s\r\n' % boundary buffer += 'Content-Disposition: form-data; name="%s"; filename="%s"\r\n' \ % (k, name) buffer += 'Content-Type: %s\r\n' % get_content_type(name) buffer += 'Content-Length: %s\r\n' % file_size buffer += '\r\n' l += len(buffer) if sock: sock.send(buffer) if hasattr(fd, 'seek'): fd.seek(0) # read file only of sock is defined if sock: while True: chunk = fd.read(CHUNK_SIZE) if not chunk: break if sock: sock.send(chunk) l += file_size buffer = '\r\n' buffer += '--%s--\r\n' % boundary buffer += '\r\n' if sock: sock.send(buffer) l += len(buffer) return l
def fd_id(fd): # Returns a tuple which uniquely identifies a file descriptor. In Mac OS, # this doesn't work with shared memory handles, which is why we don't # support the "file_descriptor" sharing method on that platform. stat = os.fstat(fd) return (stat.st_ino, stat.st_dev)
# freshen permissions, if possible. try: os.chmod(MSULOGDIR, 01777) except OSError: pass # find a safe log file to write to for this user filename = os.path.join(MSULOGDIR, MSULOGFILE % username) attempt = 0 ours = False while attempt < 10: try: fref = os.open(filename, os.O_RDWR | os.O_CREAT | os.O_NOFOLLOW, 0600) st = os.fstat(fref) ours = stat.S_ISREG(st.st_mode) and st.st_uid == os.getuid() os.close(fref) if ours: break except (OSError, IOError): pass # permission denied, symlink, ... # avoid creating many separate log files by using one static suffix # as the first alternative. if unsuccessful, switch to totally # randomly suffixed files. if attempt == 0: random.seed(hash(username)) elif attempt == 1: random.seed()
## All windows closed, clean up Editor.deinit_tty() Editor.yank_buffer = [] ## close return slot[0].content if (slot[0].fname == "") else slot[0].fname #ifdef LINUX if __name__ == "__main__": if is_linux: import stat fd_tty = 0 if len(sys.argv) > 1: name = sys.argv[1:] pye(*name, undo=500, device=fd_tty) else: name = "" if not is_micropython: mode = os.fstat(0).st_mode if stat.S_ISFIFO(mode) or stat.S_ISREG(mode): name = sys.stdin.readlines() os.close(0) ## close and repopen /dev/tty fd_tty = os.open("/dev/tty", os.O_RDONLY) ## memorized, if new fd for i, l in enumerate(name): ## strip and convert name[i], tc = expandtabs(l.rstrip('\r\n\t ')) pye(name, undo=500, device=fd_tty) else: print("\nSorry, this OS is not supported (yet)") #endif
def flen(f): return os.fstat(f.fileno()).st_size
def padToBlockSize(path): with open(path, 'a') as f: size = os.fstat(f.fileno()).st_size newSize = utils.round(size, sc.BLOCK_SIZE) log.info("Truncating file %s to %d bytes", path, newSize) os.ftruncate(f.fileno(), newSize)
import os os.dup2() #复制 os.fdatasync()#强制将文件写入磁盘 os.fstat()#返回文件描述fd的状态 os.open()#打开一个文件,并设置需要的打开选项mode 参数是可选的 os.remove()#删除 os.removedirs()#递归删除目录 os.rename()#重命名 os.renames()#递归重命名 #parser()方法 parseString()
def main(opts): logfile = open_logfile() if not logfile: say('No logfile found. Is vserver_launcher running?') return 1 if (opts.last_crash > 0) and not is_alive(): say('Program is dead. Last logs:') print_last_lines(logfile, opts.last_crash) elif opts.last > 0: say('Last logs:') print_last_lines(logfile, opts.last) if opts.restart and logfile: say('Requesting vserver.py restart') os.unlink(LOG_NAME) logfile.close() for _ in xrange(20): logfile = open_logfile() if logfile: break time.sleep(0.25) else: say('Logfile did not appear. Is vserver_launcher running?') return 2 stop_at = None if (opts.wait_exit is not None) and (opts.wait_exit >= 0): stop_at = time.time() + opts.wait_exit last_msg = time.time() say("Live logs") log_inode = None # Do the tail while True: if logfile is not None: block = logfile.read(1024) else: # wait for file to appear block = '' logfile = open_logfile() if logfile: say('File re-opened') if block != '': sys.stdout.write(block) sys.stdout.flush() last_msg = time.time() continue if not is_alive(): say('Program has exited. Terminating') return 1 if (stop_at is not None) and (stop_at < time.time()): break if opts.idle_marker > 0 and ( (last_msg + opts.idle_marker) < time.time()): say('Program still running, no messages') last_msg = time.time() # Sleep time.sleep(opts.poll_time) if logfile is not None: # Reset EOF marker if we have the file logfile.seek(0, os.SEEK_CUR) # Check if file was replaced if log_inode is None: log_inode = os.fstat(logfile.fileno()).st_ino try: disk_inode = os.stat(LOG_NAME).st_ino except OSError: disk_inode = -1 if log_inode != disk_inode: say('Logfile disappeared') logfile.close() logfile = None log_inode = None say('Time expired, program still running') return 0
def isapipe(fd): fd = getattr(fd, 'fileno', lambda: fd)() return stat.S_ISFIFO(os.fstat(fd).st_mode)
def send_head(self): """Common code for GET and HEAD commands. This sends the response code and MIME headers. Return value is either a file object (which has to be copied to the outputfile by the caller unless the command was HEAD, and must be closed by the caller under all circumstances), or None, in which case the caller has nothing further to do. """ path = self.translate_path(self.path) f = None if os.path.isdir(path): parts = urllib.parse.urlsplit(self.path) if not parts.path.endswith('/'): # redirect browser - doing basically what apache does self.send_response(HTTPStatus.MOVED_PERMANENTLY) new_parts = (parts[0], parts[1], parts[2] + '/', parts[3], parts[4]) new_url = urllib.parse.urlunsplit(new_parts) self.send_header("Location", new_url) self.send_header("Content-Length", "0") self.end_headers() return None for index in "index.html", "index.htm": index = os.path.join(path, index) if os.path.exists(index): path = index break else: return self.list_directory(path) ctype = self.guess_type(path) # check for trailing "/" which should return 404. See Issue17324 # The test for this was added in test_httpserver.py # However, some OS platforms accept a trailingSlash as a filename # See discussion on python-dev and Issue34711 regarding # parsing and rejection of filenames with a trailing slash if path.endswith("/"): self.send_error(HTTPStatus.NOT_FOUND, "File not found") return None try: f = open(path, 'rb') except OSError: self.send_error(HTTPStatus.NOT_FOUND, "File not found") return None try: fs = os.fstat(f.fileno()) # Use browser cache if possible if ("If-Modified-Since" in self.headers and "If-None-Match" not in self.headers): # compare If-Modified-Since and time of last file modification try: ims = email.utils.parsedate_to_datetime( self.headers["If-Modified-Since"]) except (TypeError, IndexError, OverflowError, ValueError): # ignore ill-formed values pass else: if ims.tzinfo is None: # obsolete format with no timezone, cf. # https://tools.ietf.org/html/rfc7231#section-7.1.1.1 ims = ims.replace(tzinfo=datetime.timezone.utc) if ims.tzinfo is datetime.timezone.utc: # compare to UTC datetime of last modification last_modif = datetime.datetime.fromtimestamp( fs.st_mtime, datetime.timezone.utc) # remove microseconds, like in If-Modified-Since last_modif = last_modif.replace(microsecond=0) if last_modif <= ims: self.send_response(HTTPStatus.NOT_MODIFIED) self.end_headers() f.close() return None self.send_response(HTTPStatus.OK) self.send_header("Content-type", ctype) self.send_header("Content-Length", str(fs[6])) self.send_header("Last-Modified", self.date_time_string(fs.st_mtime)) self.end_headers() return f except: f.close() raise
def stat(self): try: return SFTPAttributes.from_stat(os.fstat(self.readfile.fileno())) except OSError as e: return SFTPServer.convert_errno(e.errno)
os.unsetenv(varname) # Unset (delete) the environment variable named varname ################################ File Object Creation ################################ os.fdopen(fd[, mode[, bufsize]]) os.popen(command[, mode[, bufsize]]) # Deprecated since version 2.6: This function is obsolete. Use the subprocess module os.tmpfile() # Deprecated since version 2.6: All of the popen*() functions are obsolete. Use the subprocess module os.popen2(cmd[, mode[, bufsize]]) # Deprecated since version 2.6: This function is obsolete. Use the subprocess module os.popen3(cmd[, mode[, bufsize]]) # Deprecated since version 2.6: This function is obsolete. Use the subprocess module os.popen4(cmd[, mode[, bufsize]]) # Deprecated since version 2.6: This function is obsolete. Use the subprocess module ################################ File Descriptor Operations ################################ os.close(fd) # Close file descriptor fd. os.closerange(fd_low, fd_high) # Close all file descriptors from fd_low (inclusive) to fd_high (exclusive), ignoring errors os.dup(fd) # Return a duplicate of file descriptor fd. os.dup2(fd, fd2) # Duplicate file descriptor fd to fd2 os.fstat(fd) # Return status for file descriptor fd, like stat(). os.fsync(fd) # Force write of file with filedescriptor fd to disk os.isatty(fd) # Return True if the file descriptor fd is open and connected to a tty(-like) device, else False. os.lseek(fd, pos, how) # Set the current position of file descriptor fd to position pos, modified by how: SEEK_SET or 0 os.open(file, flags[, mode]) # Open the file file and set various flags according to flags os.pipe() # Create a pipe. Return a pair of file descriptors (r, w) os.read(fd, n) # Read at most n bytes from file descriptor fd os.write(fd, str) # Write the string str to file descriptor fd ################################ Files and Directories ################################ os.access(path, mode) # Use the real uid/gid to test for access to path os.chdir(path) # Change the current working directory to path. os.getcwd() # Return a string representing the current working directory. os.getcwdu() # Return a Unicode object representing the current working directory. os.chmod(path, mode) # Change the mode of path to the numeric mode
def maybeParseConfig(self): if self.resolv is None: # Don't try to parse it, don't set up a call loop return try: resolvConf = file(self.resolv) except IOError, e: if e.errno == errno.ENOENT: # Missing resolv.conf is treated the same as an empty resolv.conf self.parseConfig(()) else: raise else: mtime = os.fstat(resolvConf.fileno()).st_mtime if mtime != self._lastResolvTime: log.msg('%s changed, reparsing' % (self.resolv, )) self._lastResolvTime = mtime self.parseConfig(resolvConf) # Check again in a little while from twisted.internet import reactor self._parseCall = reactor.callLater(self._resolvReadInterval, self.maybeParseConfig) def parseConfig(self, resolvConf): servers = [] for L in resolvConf: L = L.strip() if L.startswith('nameserver'):
# imagesos = os.listdir("/home/neo/Documents/Python/Images/all/") images = [ "/home/neo/Documents/Python/Images/all/" + item for item in imagesos ] images.append("/home/neo/Documents/Python/Images/Yann.jpg") known_face_encodings = [] known_face_names = [] csvwriter = csv.writer( filewrite, delimiter=",") # , quotechar='', quoting=csv.QUOTE_MINIMAL) if load: # npdata = np.load('face_recognitiontest.npy', allow_pickle=True): try: with open("face_recognitiontestdb.csv", "r") as db: if os.fstat(db.fileno()).st_size: known_face_encodings = np.loadtxt(db, delimiter=",", skiprows=1, unpack=True).tolist() else: print("Error db loading : File is empty") load = False except (StopIteration, UserWarning): load = False csvreader = csv.reader(fileread, delimiter=",") # , quotechar='') csvlist = [] for idlist, item in enumerate(csvreader): # known_face_encodings.append(npdata[idlist]) # print(npdata[idlist]) print(item[0])
def remapfile(self): import mmap size = os.fstat(self.fd).st_size self.mm = mmap.mmap(self.fd, size, access=self.access)
def copyupdown_internal(wh, sd, upp): '''Copy up/down a file or dir. wh: 'copyup' or 'copydown' sd: (source, destination) paths upp: True for copyup, False for copydown ''' if not downtmp: bomb("%s when not open" % wh) if not sd[0] or not sd[1]: bomb("%s paths must be nonempty" % wh) dirsp = sd[0][-1] == '/' if dirsp != (sd[1][-1] == '/'): bomb("%s paths must agree about directoryness" " (presence or absence of trailing /)" % wh) # if we have a shared directory, we just need to copy it from/to there; in # most cases, it's testbed end is already in the downtmp dir downtmp_host = get_downtmp_host() if downtmp_host: try: if upp: copyup_shareddir(sd[0], sd[1], dirsp, downtmp_host) else: copydown_shareddir(sd[0], sd[1], dirsp, downtmp_host) return except Timeout: raise FailedCmd(['timeout']) except (shutil.Error, subprocess.CalledProcessError) as e: adtlog.debug( 'Cannot copy %s to %s through shared dir: %s, falling back to tar' % (sd[0], sd[1], str(e))) isrc = 0 idst = 1 ilocal = 0 + upp iremote = 1 - upp deststdout = devnull_read srcstdin = devnull_read remfileq = pipes.quote(sd[iremote]) if not dirsp: rune = 'cat %s%s' % ('><'[upp], remfileq) if upp: deststdout = open(sd[idst], 'w') else: srcstdin = open(sd[isrc], 'r') status = os.fstat(srcstdin.fileno()) if status.st_mode & 0o111: rune += '; chmod +x -- %s' % (remfileq) localcmdl = ['cat'] else: taropts = [None, None] taropts[isrc] = '--warning=none -c .' taropts[idst] = '--warning=none --preserve-permissions --extract ' \ '--no-same-owner' rune = 'cd %s; tar %s -f -' % (remfileq, taropts[iremote]) if upp: try: os.mkdir(sd[ilocal]) except (IOError, OSError) as oe: if oe.errno != errno.EEXIST: raise else: rune = ('if ! test -d %s; then mkdir -- %s; fi; ' % (remfileq, remfileq)) + rune localcmdl = ['tar', '--directory', sd[ilocal]] + ( ('%s -f -' % taropts[ilocal]).split()) downcmdl = auxverb + ['sh', '-ec', rune] if upp: cmdls = (downcmdl, localcmdl) else: cmdls = (localcmdl, downcmdl) adtlog.debug(str(["cmdls", str(cmdls)])) adtlog.debug( str([ "srcstdin", str(srcstdin), "deststdout", str(deststdout), "devnull_read", devnull_read ])) subprocs = [None, None] adtlog.debug(" +< %s" % ' '.join(cmdls[0])) subprocs[0] = subprocess.Popen(cmdls[0], stdin=srcstdin, stdout=subprocess.PIPE) adtlog.debug(" +> %s" % ' '.join(cmdls[1])) subprocs[1] = subprocess.Popen(cmdls[1], stdin=subprocs[0].stdout, stdout=deststdout) subprocs[0].stdout.close() try: timeout_start(copy_timeout) for sdn in [1, 0]: adtlog.debug(" +" + "<>"[sdn] + "?") status = subprocs[sdn].wait() if not (status == 0 or (sdn == 0 and status == -13)): timeout_stop() bomb("%s %s failed, status %d" % (wh, ['source', 'destination'][sdn], status)) timeout_stop() except Timeout: for sdn in [1, 0]: subprocs[sdn].kill() subprocs[sdn].wait() raise FailedCmd(['timeout'])
import itertools import os import re import stat import sys import traceback from voussoirkit import clipext from voussoirkit import expressionmatch from voussoirkit import pathclass from voussoirkit import safeprint from voussoirkit import spinal # Thanks georg # http://stackoverflow.com/a/13443424 STDIN_MODE = os.fstat(sys.stdin.fileno()).st_mode if stat.S_ISFIFO(STDIN_MODE): STDIN_MODE = 'pipe' else: STDIN_MODE = 'terminal' def all_terms_match(search_text, terms, match_function): matches = ( (not terms['yes_all'] or all(match_function(search_text, term) for term in terms['yes_all'])) and (not terms['yes_any'] or any(match_function(search_text, term) for term in terms['yes_any'])) and (not terms['not_all'] or not all(match_function(search_text, term) for term in terms['not_all'])) and (not terms['not_any'] or not any(match_function(search_text, term) for term in terms['not_any'])) ) return matches def search(
def _handle_get_static(self, path_match, data): """ Returns a static file. """ req_file = util.sanitize_path(path_match.group('file')) # Strip md5 hash out of frontend filename if re.match(r'^frontend-[A-Za-z0-9]{32}\.html$', req_file): req_file = "frontend.html" path = os.path.join(os.path.dirname(__file__), 'www_static', req_file) inp = None try: inp = open(path, 'rb') do_gzip = 'gzip' in self.headers.get('accept-encoding', '') self.send_response(HTTP_OK) ctype = self.guess_type(path) self.send_header("Content-Type", ctype) # Add cache if not development if not self.server.development: # 1 year in seconds cache_time = 365 * 86400 self.send_header("Cache-Control", "public, max-age={}".format(cache_time)) self.send_header( "Expires", self.date_time_string(time.time() + cache_time)) if do_gzip: gzip_data = gzip.compress(inp.read()) self.send_header("Content-Encoding", "gzip") self.send_header("Vary", "Accept-Encoding") self.send_header("Content-Length", str(len(gzip_data))) else: fs = os.fstat(inp.fileno()) self.send_header("Content-Length", str(fs[6])) self.end_headers() if self.command == 'HEAD': return elif do_gzip: self.wfile.write(gzip_data) else: self.copyfile(inp, self.wfile) except IOError: self.send_response(HTTP_NOT_FOUND) self.end_headers() finally: if inp: inp.close()
def sizesha1(f): size = os.fstat(f.fileno())[6] f.seek(0) sha1sum = apt_pkg.sha1sum(f) return (sha1sum, size)
def isEndOfFile(fic): return (fic.tell() == os.fstat(fic.fileno()).st_size)
def last_update(self): return os.fstat(self._tmp.fileno()).st_ctime
def in_place( self, mode='r', buffering=-1, encoding=None, errors=None, newline=None, backup_extension=None, ): """ A context in which a file may be re-written in-place with new content. Yields a tuple of :samp:`({readable}, {writable})` file objects, where `writable` replaces `readable`. If an exception occurs, the old file is restored, removing the written data. Mode *must not* use ``'w'``, ``'a'``, or ``'+'``; only read-only-modes are allowed. A :exc:`ValueError` is raised on invalid modes. For example, to add line numbers to a file:: p = Path(filename) assert p.isfile() with p.in_place() as (reader, writer): for number, line in enumerate(reader, 1): writer.write('{0:3}: '.format(number))) writer.write(line) Thereafter, the file at `filename` will have line numbers in it. """ if set(mode).intersection('wa+'): raise ValueError('Only read-only file modes can be used') # move existing file to backup, create new file with same permissions # borrowed extensively from the fileinput module backup_fn = self + (backup_extension or os.extsep + 'bak') backup_fn.remove_p() self.rename(backup_fn) readable = io.open( backup_fn, mode, buffering=buffering, encoding=encoding, errors=errors, newline=newline, ) try: perm = os.fstat(readable.fileno()).st_mode except OSError: writable = self.open( 'w' + mode.replace('r', ''), buffering=buffering, encoding=encoding, errors=errors, newline=newline, ) else: os_mode = os.O_CREAT | os.O_WRONLY | os.O_TRUNC os_mode |= getattr(os, 'O_BINARY', 0) fd = os.open(self, os_mode, perm) writable = io.open( fd, "w" + mode.replace('r', ''), buffering=buffering, encoding=encoding, errors=errors, newline=newline, ) with contextlib.suppress(OSError, AttributeError): self.chmod(perm) try: yield readable, writable except Exception: # move backup back readable.close() writable.close() self.remove_p() backup_fn.rename(self) raise else: readable.close() writable.close() finally: backup_fn.remove_p()
def fsize(self): return os.fstat(self.f.fileno())[stat.ST_SIZE]
def upload_to_s3(aws_access_key_id, aws_secret_access_key, fname, bucket, key, callback=None, md5=None, reduced_redundancy=False, content_type=None, host='s3.eu-central-1.amazonaws.com'): """ XXX copied from somewher on stackoverflow. Hope to find it again. Uploads the given file to the AWS S3 bucket and key specified. callback is a function of the form: def callback(complete, total) The callback should accept two integer parameters, the first representing the number of bytes that have been successfully transmitted to S3 and the second representing the size of the to be transmitted object. Returns boolean indicating success/failure of upload. """ switch_validation = False if host is not None: if 'eu-central' in host: switch_validation = True os.environ['S3_USE_SIGV4'] = 'True' com = boto.connect_s3(aws_access_key_id, aws_secret_access_key, host=host) bucket = com.get_bucket(bucket, validate=True) s3_key = Key(bucket) s3_key.key = key if content_type: s3_key.set_metadata('Content-Type', content_type) with open(fname) as fid: try: size = os.fstat(fname.fileno()).st_size except: # Not all file objects implement fileno(), # so we fall back on this fid.seek(0, os.SEEK_END) size = fid.tell() sent = s3_key.set_contents_from_file( fid, cb=callback, md5=md5, reduced_redundancy=reduced_redundancy, rewind=True) # Rewind for later use fid.seek(0) if switch_validation: del os.environ['S3_USE_SIGV4'] if sent == size: return True return False
if check(arr): return (i+1) return n def check(arr): for i,x in enumerate(arr): if (i+1) !=x: return False return True def f(arr,i): if arr[i] > arr[i+1]: tmp = arr[i] arr[i] = arr[i+1] arr[i+1] = tmp import os import io # import time # a=time.time() if __name__ == "__main__": input = io.BytesIO(os.read(0, os.fstat(0).st_size)).readline T = int(input().decode().strip()) for t in range(T): n = int(input().decode().strip()) arr = [int(x) for x in input().decode().strip().split(" ")] res = solve_dumb(arr, n) # check(res) print(res)
def update_body_from_data(self, data, skip_auto_headers): if not data: return if isinstance(data, str): data = data.encode(self.encoding) if isinstance(data, (bytes, bytearray)): self.body = data if (hdrs.CONTENT_TYPE not in self.headers and hdrs.CONTENT_TYPE not in skip_auto_headers): self.headers[hdrs.CONTENT_TYPE] = 'application/octet-stream' if hdrs.CONTENT_LENGTH not in self.headers and not self.chunked: self.headers[hdrs.CONTENT_LENGTH] = str(len(self.body)) elif isinstance(data, (asyncio.StreamReader, streams.DataQueue)): self.body = data elif asyncio.iscoroutine(data): self.body = data if (hdrs.CONTENT_LENGTH not in self.headers and self.chunked is None): self.chunked = True elif isinstance(data, io.IOBase): assert not isinstance(data, io.StringIO), \ 'attempt to send text data instead of binary' self.body = data if not self.chunked and isinstance(data, io.BytesIO): # Not chunking if content-length can be determined size = len(data.getbuffer()) self.headers[hdrs.CONTENT_LENGTH] = str(size) self.chunked = False elif not self.chunked and isinstance(data, io.BufferedReader): # Not chunking if content-length can be determined try: size = os.fstat(data.fileno()).st_size - data.tell() self.headers[hdrs.CONTENT_LENGTH] = str(size) self.chunked = False except OSError: # data.fileno() is not supported, e.g. # io.BufferedReader(io.BytesIO(b'data')) self.chunked = True else: self.chunked = True if hasattr(data, 'mode'): if data.mode == 'r': raise ValueError('file {!r} should be open in binary mode' ''.format(data)) if (hdrs.CONTENT_TYPE not in self.headers and hdrs.CONTENT_TYPE not in skip_auto_headers and hasattr(data, 'name')): mime = mimetypes.guess_type(data.name)[0] mime = 'application/octet-stream' if mime is None else mime self.headers[hdrs.CONTENT_TYPE] = mime elif isinstance(data, MultipartWriter): self.body = data.serialize() self.headers.update(data.headers) self.chunked = self.chunked or 8192 else: if not isinstance(data, helpers.FormData): data = helpers.FormData(data) self.body = data(self.encoding) if (hdrs.CONTENT_TYPE not in self.headers and hdrs.CONTENT_TYPE not in skip_auto_headers): self.headers[hdrs.CONTENT_TYPE] = data.content_type if data.is_multipart: self.chunked = self.chunked or 8192 else: if (hdrs.CONTENT_LENGTH not in self.headers and not self.chunked): self.headers[hdrs.CONTENT_LENGTH] = str(len(self.body))
def _send_request(self, method, path, headers, subres=None, query_string=None, body=None): '''Add authentication and send request Returns the response object. ''' if not isinstance(headers, CaseInsensitiveDict): headers = CaseInsensitiveDict(headers) if not self.hostname.startswith(self.bucket_name): path = '/%s%s' % (self.bucket_name, path) headers['host'] = self.hostname self._authorize_request(method, path, headers, subres, query_string) path = urllib.parse.quote(path) if query_string: s = urllib.parse.urlencode(query_string, doseq=True) if subres: path += '?%s&%s' % (subres, s) else: path += '?%s' % s elif subres: path += '?%s' % subres # We can probably remove the assertions at some point and # call self.conn.read_response() directly def read_response(): resp = self.conn.read_response() assert resp.method == method assert resp.path == path return resp use_expect_100c = not self.options.get('disable-expect100', False) try: log.debug('sending %s %s', method, path) if body is None or isinstance(body, (bytes, bytearray, memoryview)): self.conn.send_request(method, path, body=body, headers=headers) else: body_len = os.fstat(body.fileno()).st_size self.conn.send_request(method, path, expect100=use_expect_100c, headers=headers, body=BodyFollowing(body_len)) if use_expect_100c: resp = read_response() if resp.status != 100: # Error return resp try: copyfileobj(body, self.conn, BUFSIZE) except ConnectionClosed: # Server closed connection while we were writing body data - # but we may still be able to read an error response try: resp = read_response() except ConnectionClosed: # No server response available pass else: if resp.status >= 400: # Got error response return resp log.warning( 'Server broke connection during upload, but signaled ' '%d %s', resp.status, resp.reason) # Re-raise first ConnectionClosed exception raise return read_response() except Exception as exc: if is_temp_network_error(exc) or isinstance(exc, ssl.SSLError): # We probably can't use the connection anymore self.conn.disconnect() raise
def _do_request(self, connection, method, path, headers=None, body=None, download_body=True): '''Send request, read and return response object''' log.debug('started with %s %s', method, path) if headers is None: headers = CaseInsensitiveDict() if self.authorization_token is None: self._authorize_account() if 'Authorization' not in headers: headers['Authorization'] = self.authorization_token if self.test_mode_expire_some_tokens: headers[ 'X-Bz-Test-Mode'] = 'expire_some_account_authorization_tokens' if self.test_mode_force_cap_exceeded: headers['X-Bz-Test-Mode'] = 'force_cap_exceeded' log.debug('REQUEST: %s %s %s', connection.hostname, method, path) if body is None or isinstance(body, (bytes, bytearray, memoryview)): connection.send_request(method, path, headers=headers, body=body) else: body_length = os.fstat(body.fileno()).st_size connection.send_request(method, path, headers=headers, body=BodyFollowing(body_length)) copyfileobj(body, connection, BUFSIZE) response = connection.read_response() if download_body is True or response.status != 200: # Backblaze always returns a json with error information in body response_body = connection.readall() else: response_body = None content_length = response.headers.get('Content-Length', '0') log.debug('RESPONSE: %s %s %s %s', response.method, response.status, response.reason, content_length) if ( response.status == 404 or # File not found (response.status != 200 and method == 'HEAD') ): # HEAD responses do not have a body -> we have to raise a HTTPError with the code raise HTTPError(response.status, response.reason, response.headers) if response.status != 200: json_error_response = json.loads( response_body.decode('utf-8')) if response_body else None code = json_error_response['code'] if json_error_response else None message = json_error_response[ 'message'] if json_error_response else response.reason b2_error = B2Error(json_error_response['status'], code, message, response.headers) raise b2_error return response, response_body
def working_tree_time_ns(self) -> int: self._mtime_probe.seek(0) self._mtime_probe.write(b'0') # updates mtime return os.fstat(self._mtime_probe.fileno()).st_mtime_ns
def _fill(self): s = os.read(self._fd, max(os.fstat(self._fd).st_size, BUFSIZE)) self.seek((self.tell(), self.seek(0,2), super(FastIO, self).write(s))[0]) return s