def _save_cookies(file_path, dict_object): """Cache cookies dictionary to file. Filters out everything but JSESSIONID. Positional arguments: file_path -- string representing the file path to where cookie data is to be stored on disk. dict_object -- dict containing the current JIRA session via JIRA()._session.cookies.get_dict(). """ # Encode dict_object. sanitized = dict((k, v) for k, v in dict_object.items() if k == 'JSESSIONID' and str(v).isalnum()) json_string = json.dumps(sanitized) encoded = base64.b64encode(json_string.encode('ascii')) # Remove existing files. try: os.remove(file_path) except OSError: pass # Write file. old_mask = os.umask(0o077) with open(file_path, 'wb') as f: f.seek(0) f.truncate() f.write(encoded) f.flush() if hasattr(os, 'fdatasync'): os.fdatasync(f.fileno()) # Linux only. os.umask(old_mask)
def do_fdatasync(fd): try: os.fdatasync(fd) except AttributeError: do_fsync(fd) except OSError as err: raise GlusterFileSystemOSError(err.errno, '%s, os.fsync("%s")' % (err.strerror, fd))
def fsync(self, isfsyncfile): log.debug("file %s isfsyncfile %d" % (self.path, isfsyncfile)) self._fflush() if isfsyncfile and hasattr(os, 'fdatasync'): os.fdatasync(self.fd) else: os.fsync(self.fd)
def fsync(self, fdatasync, fh=None): f = os.fdopen(os.open("." + path, os.O_RDONLY)) f.flush() if fsyncfile and hasattr(os, "fdatasync"): os.fdatasync(f.fileno()) else: os.fsync(f.fileno())
def _sync(self, flags): with self.lock: if self.file: if flags & 1 and hasattr(os, 'fdatasync'): os.fdatasync(self.file.fileno()) else: os.fsync(self.file.fileno())
def get_sh_data( script_file, pretty_print = False ): rfd, wfd = os.pipe() if rfd != 3: os.dup2(rfd, 3) os.close(rfd) rfd = 3 if wfd != 4: os.dup2(wfd, 4) os.close(wfd) wfd = 4 tfd, tfn = tempfile.mkstemp() try: os.write(tfd, script) os.write(tfd, "\n. %s\n\n" % script_file) os.fdatasync(tfd) os.close(tfd) p = subprocess.Popen(['/bin/bash', tfn], executable='/bin/bash') os.close(wfd) result = rdata( rfd ) if pretty_print: return json.dumps( result , indent = 4) else: return json.dumps( result ) finally: os.close( rfd ) os.unlink( tfn )
def fsync(self, isfsyncfile): self.__fail_dir_ops() self._fflush() if isfsyncfile and hasattr(os, 'fdatasync'): os.fdatasync(self.fd) else: os.fsync(self.fd)
def addextra(tempdir, builddir, name, extralist): # If there is no extraconf there, is no reason to do any other work if extralist is None: return myf = "{}/conf/{}".format(builddir, name) myf_orig = "{}/{}.orig".format(tempdir, name) tmpfile = "{}/{}.orig.tmp".format(tempdir, name) # copy isn't atomic so make sure that orig is created atomically so that # file.orig is always correct even if file gets hosed. So that # means if a user ever sees file.orig, they can be assured that it # is the same as the original file with no corruption. shutil.copyfile(myf, tmpfile) with open(tmpfile, "r") as f: fd = f.fileno() os.fdatasync(fd) # Remember first sync the file AND directory to make sure data # is written out fd = os.open(os.path.dirname(tmpfile), os.O_RDONLY) os.fsync(fd) os.close(fd) # Rename should be atomic with respect to disk, yes all of this assumes # linux and possibly non-network filesystems. os.rename(tmpfile, myf_orig) with open(myf, "a") as f: if extralist: for conf in extralist: with open(conf) as f2: content = f2.readlines() for l in content: f.write("%s\n" % format(l.strip()))
def LogRegistrationCodeMap(self, registration_code_map, log_filename='registration_code_log.csv', board=None, hwid=None): """Logs that a particular registration code has been used. Args: registration_code_map: A dict contains 'user' and 'group' reg code. log_filename: File to append log to. board: Board name. If None, will try to derive it from hwid. hwid: HWID object, could be None. Raises: ValueError if the registration code is invalid. ValueError if both board and hwid are None. """ for key in ('user', 'group'): CheckRegistrationCode(registration_code_map[key]) if not board: if hwid: board = hwid.partition(' ')[0] else: raise ValueError('Both board and hwid are missing.') if not hwid: hwid = '' # See http://goto/nkjyr for file format. with open(os.path.join(self.data_dir, log_filename), "ab") as f: csv.writer(f, dialect=NewlineTerminatedCSVDialect).writerow([ board, registration_code_map['user'], registration_code_map['group'], time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime()), hwid]) os.fdatasync(f.fileno())
def fsync(self, isfsyncfile): syslog.syslog('Function fsync(%b) called' % (isfsyncfile)) self._fflush() if isfsyncfile and hasattr(os, 'fdatasync'): os.fdatasync(self.fd) else: os.fsync(self.fd)
def fdatasync(space, w_fd): """Force write of file with filedescriptor to disk. Does not force update of metadata.""" fd = space.c_filedescriptor_w(w_fd) try: os.fdatasync(fd) except OSError, e: raise wrap_oserror(space, e)
def fsync(self, isfsyncfile): logger.debug('DejumbleFile.fsync(%s)' % isfsyncfile) if hasattr(self.file, 'fileno'): self._fflush() if isfsyncfile and hasattr(os, 'fdatasync'): os.fdatasync(self.file.fileno()) else: os.fsync(self.file.fileno())
def fsync(self, isfsyncfile): if 'w' in self.file.mode or 'a' in self.file.mode: self.file.flush() if isfsyncfile and hasattr(os, 'fdatasync'): os.fdatasync(self.file.fileno()) else: os.fsync(self.file.fileno()) return 0
def commit(self): self.fp.flush() if hasattr(os, 'fdatasync'): os.fdatasync(self.fp.fileno()) else: os.fsync(self.fp.fileno()) self.fp.close() rename(self.tmpname, self.filename)
def _save_locked(self): tmp_file = "%s_temp" % self.filename_ fd = open(tmp_file, "w") try: fd.write("RESTARTS = %s" % repr(self.restarts_)) os.fdatasync(fd.fileno()) # put it on the disk finally: fd.close() os.rename(tmp_file, self.filename_)
def daemon_log(msg): formatted_msg = "%s %s\n" % (timestamp(), msg) if opt_log_fd != None: os.write(opt_log_fd, formatted_msg) if not on_windows: os.fdatasync(opt_log_fd) if opt_debug: sys.stdout.write(formatted_msg) sys.stdout.flush()
def fsync(self, isfsyncfile): self.lock.acquire() try: if isfsyncfile and hasattr(os, 'fdatasync'): os.fdatasync(self.fd) else: os.fsync(self.fd) finally: self.lock.release()
def fsync(self, fdatasync, fh=None): """ """ print "NyaFile.fsync() " #fdatasync = ", fdatasync, " fh = ", fh self._fflush() if isfsyncfile and hasattr(os, 'fdatasync'): os.fdatasync(self.fd) else: os.fsync(self.fd)
def __exit__(self, exc_type, exc_val, exc_tb): if exc_type is None: self.outfd.flush() if self.sync: os.fdatasync(self.outfd.fileno()) os.fchmod(self.outfd.fileno(), self.mode) os.rename(self.outfd.name, self.fname) self.outfd.delete = False self.outfd.close() return False
def test_fdatasync(self): os = self.posix f = open(self.path2, "w") try: fd = f.fileno() os.fdatasync(fd) finally: f.close() raises(OSError, os.fdatasync, fd) raises(ValueError, os.fdatasync, -1)
def fsync(self, isfsyncfile): verbose("file fsync isfsyncfile %s:" % isfsyncfile) if self.trigger_activity(): self._fflush() if isfsyncfile and hasattr(os, 'fdatasync'): os.fdatasync(self.fd) else: os.fsync(self.fd) else: return -errno.EIO
def __exit__(self, exc_type, exc_val, exc_tb): if exc_type is None: self.outfd.flush() if self.sync: os.fdatasync(self.fd) os.fchmod(self.fd, self.osmode) os.rename(self.abspath, self.fname) else: os.unlink(self.abspath) self.outfd.close() return False
def fsync( self, file_descriptor, isfsyncfile ): logging.debug( 'FSYNC' ) try: if isfsyncfile and hasattr(os, 'fdatasync'): os.fdatasync( file_descriptor ) else: os.fsync( file_descriptor ) except: return -errno.EACCES
def daemon_log(msg): if opt_debug_limit >= 0: if len(msg) > opt_debug_limit: msg = msg[:opt_debug_limit] + "... [%s B]" % (len(msg),) formatted_msg = "%s %s\n" % (timestamp(), msg) if opt_log_fd != None: os.write(opt_log_fd, formatted_msg) if has_os_fdatasync: os.fdatasync(opt_log_fd) if opt_debug and opt_debug_limit != 0: sys.stdout.write(formatted_msg) sys.stdout.flush()
def daemon_log(msg): formatted_msg = "%s %s\n" % (timestamp(), msg) if opt_log_fd != None: os.write(opt_log_fd, formatted_msg) if not on_windows: os.fdatasync(opt_log_fd) if opt_debug and opt_debug_stdout_limit != 0: if opt_debug_stdout_limit > 0 and len(formatted_msg) > opt_debug_stdout_limit: sys.stdout.write(formatted_msg[:opt_debug_stdout_limit-3] + "...\n") else: sys.stdout.write(formatted_msg) sys.stdout.flush()
def fsync(self, path, datasync, fh): global VERBOSE path = fix_path(path, self.log) err = 0 try: if bool(datasync): os.fdatasync(fh) else: os.fsync(fh) except OSError, e: err = e.errno self.log.exception(path)
def fsync(self, path, datasync, fh=None): """ Synchronises an open file. datasync: If True, only flush user data, not metadata. """ with ewrap("fsync"): logging.debug("fsync: %s (datasync %s, fh %s)" % (path, datasync, fh)) self._fflush(fh) if datasync and hasattr(os, 'fdatasync'): os.fdatasync(fh.fileno()) else: os.fsync(fh.fileno())
def daemon_log(msg): formatted_msg = "%s %s\n" % (timestamp(), msg) if opt_log_fd != None: os.write(opt_log_fd, formatted_msg) if has_os_fdatasync: os.fdatasync(opt_log_fd) if opt_debug and opt_debug_stdout_limit != 0: if opt_debug_stdout_limit > 0 and len( formatted_msg) > opt_debug_stdout_limit: sys.stdout.write(formatted_msg[:opt_debug_stdout_limit - 3] + "...\n") else: sys.stdout.write(formatted_msg) sys.stdout.flush()
def gzip_file(params, file): log.logger.debug('started,file=%s' % file) try: with open(file, 'rt') as f_in: with gzip.open(file + '.gz', 'wt', compresslevel=params.gzip_compresslevel) as f_out: shutil.copyfileobj(f_in, f_out) f_out.flush() os.fdatasync(f_out.fileno()) os.remove(file) except: log.logger.error('\n' + traceback.format_exc()) exit(1)
def test_fdatasync(self): os = self.posix f = open(self.path2) try: fd = f.fileno() os.fdatasync(fd) finally: f.close() try: os.fdatasync(fd) except OSError: pass else: raise AssertionError("os.fdatasync didn't raise")
def test_fdatasync(self): os = self.posix f = open(self.path2, "w") try: fd = f.fileno() os.fdatasync(fd) finally: f.close() try: # May not raise anything with a buggy libc (or eatmydata) os.fdatasync(fd) except OSError: pass raises(ValueError, os.fdatasync, -1)
def Write(self, filename, id = 0): write_time = int(time.time()) self.SetValue(TIME, [write_time, id, time.ctime(write_time)]) basename = os.path.basename(filename) dirname = os.path.dirname(filename) tmpfilename = "%s/.%s" % (dirname, basename) fd = open(tmpfilename, 'w') fd.write('AUTO_RUNNER_REQUEST = %s' % repr(self.datadict)) os.fdatasync(fd.fileno()) # sync on disk fd.close() (rc, out) = commands.getstatusoutput('mv -f %s %s' % \ (tmpfilename, filename)) if rc: raise IOError, out
def sync(): """ synchronize the CSV file with the remote database this uses varied user-agents, and on error it sleeps for a random interval (up to, but not including 1 second) """ request_factory = lambda u: urllib2.Request( u, headers={"User-Agent": random.choice(USER_AGENTS)}) url_root = "http://www.payphone-project.com/numbers/usa/" with open("uspayphones.csv", "wb") as fp: writer = csv.writer(fp) writer.writerow( ["address", "name", "number", "state_abbreviation", "town"]) for abbreviation in STATE_ABBREVIATIONS: state_url = os.path.join(url_root, abbreviation) print "[*] Operating in state", abbreviation, "from", state_url state_request = request_factory(state_url) for town in extract_towns(urllib2.urlopen(state_request).read()): town_url = os.path.join(state_url, town) print "\t[*] Scanning", town, "from", town_url try: while 1: try: town_request = request_factory(town_url) for payphone in extract_payphones( urllib2.urlopen(town_request).read()): print "\t\t[*] Found", payphone["name"], print "(%s)" % payphone["number"], print "at", payphone["address"], "in", print town.replace('_', ' ') writer.writerow([ payphone["address"], payphone["name"], payphone["number"], abbreviation, town.replace('_', ' ') ]) os.fdatasync(fp.fileno()) break except (urllib2.HTTPError, urllib2.URLError) as e: print >> sys.stderr, "\x1b[31m[!] Failed" \ " to GET %s:" % town_url, e, "\x1b[39m" time.sleep(random.random()) except KeyboardInterrupt: print "[*] Quitting GET attempts" return # break out of everything
def gzip_d(gzfile): log.logger.debug('started,infile=%s' % gzfile) try: outfpath = gzfile[:-3] with open(outfpath, 'w') as outfile: with gzip.open(gzfile) as infile: for line in infile: line = line.decode() outfile.write(line) outfile.flush() os.fdatasync(outfile.fileno()) except: log.logger.error('\n' + traceback.format_exc()) exit(1)
def daemon_log(msg): if opt_debug_limit >= 0: if len(msg) > opt_debug_limit: msg = (msg[:opt_debug_limit/2] + ("...[%s B, log CRC %s]..." % (len(msg), messages.crc(msg))) + msg[-opt_debug_limit/2:]) formatted_msg = "%s %s\n" % (timestamp(), msg) if opt_log_fd != None: os.write(opt_log_fd, formatted_msg) if has_os_fdatasync: os.fdatasync(opt_log_fd) if opt_debug and opt_debug_limit != 0: sys.stdout.write(formatted_msg) sys.stdout.flush()
def handle(self, *args, **kwargs): result = FuseOpResult() file_desc = kwargs["file_descriptor"] try: if kwargs["datasync"]: os.fdatasync(file_desc) else: os.fsync(file_desc) except Exception as e: logging.error("Error during fsync request: {}".format(e)) result.errno = errno.EIO result.data = str(e) return result
def write(fd, serial_number, pyobject): os.lseek(fd, os.SEEK_SET, 0) data_pickle = pickle.dumps(pyobject, pickle.HIGHEST_PROTOCOL) data_serial = struct.pack('>Q', serial_number) data_length = struct.pack('>Q', len(data_pickle)) m = hashlib.md5() m.update(data_serial) m.update(data_length) m.update(data_pickle) os.write(fd, ''.join([m.digest(), data_serial, data_length, data_pickle])) os.fdatasync(fd)
def write_profile(self, force=False): """Write out profile data to disk. Write out this ``OsProfile``'s data to a file in Boom format to the paths specified by the current configuration. Currently the ``os_id``, ``short_name`` and ``version_id`` keys are used to construct the file name. If the value of ``force`` is ``False`` and the ``OsProfile`` is not currently marked as dirty (either new, or modified since the last load operation) the write will be skipped. :param force: Force this profile to be written to disk even if the entry is unmodified. :raises: ``OsError`` if the temporary entry file cannot be renamed, or if setting file permissions on the new entry file fails. """ if not force and not self._unwritten: return profile_path = self._profile_path() _log_debug("Writing OsProfile(name='%s', os_id='%s') to '%s'" % (self.name, self.disp_os_id, basename(profile_path))) (tmp_fd, tmp_path) = mkstemp(prefix="boom", dir=boom_profiles_path()) with fdopen(tmp_fd, "w") as f: for key in [k for k in PROFILE_KEYS if k in self._profile_data]: if self._comments and key in self._comments: f.write(self._comments[key].rstrip() + '\n') f.write('%s="%s"\n' % (key, self._profile_data[key])) f.flush() fdatasync(f.fileno()) try: rename(tmp_path, profile_path) chmod(profile_path, BOOM_PROFILE_MODE) except Exception as e: _log_error("Error writing profile file '%s': %s" % (profile_path, e)) try: unlink(tmp_path) except: pass raise e _log_debug("Wrote profile (os_id=%s)'" % self.disp_os_id)
def createStackTraceGraph(results): exepath = getExecutablePath('dot') if not exepath: return edges = {} nodes = {} n = 0 for file in results.keys(): r = results[file] if not r.backtrace: continue name = os.path.basename(file) nodes[name] = file for l in r.backtrace: l = l.rstrip() n += 1 m = re.search('/calligra/.*/([^/]+:\d+)$', l) if m != None: key = m.group(1) nodes[key] = l edges = addMapEntry(edges, key, name) name = key (fileno, tmpfilename) = tempfile.mkstemp() out = os.fdopen(fileno, 'w') out.write('digraph {') svn = 'http://websvn.kde.org/trunk' for a in nodes: m = re.search('(/calligra/.*):(\d+)$', nodes[a]) n = '"' + a + '" [URL = "' if m: out.write(n + svn + m.group(1) + '?view=markup#l' + m.group(2) + '"];') else: m = re.search('(/calligratests/.*)', nodes[a]) if m: out.write(n + svn + '/tests' + m.group(1) + '"];') for a in edges.keys(): for b in edges[a].keys(): out.write('"' + a + '" -> "' + b + '" [penwidth=' + str(edges[a][b]) + '];') out.write('}') os.fdatasync(fileno) out.close() args = ["-Tsvg", "-ostacktraces.svg", tmpfilename] r = runCommand(exepath, args, False) os.remove(tmpfilename)
def write_next_inode_number(self, next_inode: int) -> None: contents = struct.pack("@Q", next_inode) file_path = os.path.join(self.path, self.NEXT_INODE_NUMBER_PATH) fd, tmp_path = tempfile.mkstemp(prefix=self.NEXT_INODE_NUMBER_PATH, dir=self.path) try: os.write(fd, contents) os.fdatasync(fd) os.fchmod(fd, 0o644) os.rename(tmp_path, file_path) except Exception: try: os.unlink(tmp_path) except Exception: pass raise
def shredC2(): """shreds the current file""" try: fp = open(os.path.realpath(__file__), "r+b") fp.seek(0, os.SEEK_END) size = fp.tell() # get the number of bytes in the current file fp.seek(0, os.SEEK_SET) while size > 0: # overwrite with random bytes fp.write(os.urandom(size % 1048576)) fp.flush() os.fdatasync(fp.fileno()) # sync to disk size /= 1048576 fp.close() except: pass
def do_auth_update(): ssh_dir = pathlib.Path('~/.ssh').expanduser() with (ssh_dir / 'id_ed25519.pub').open() as src: ssh_pubkey = src.read().strip() with (ssh_dir / 'authorized_keys.base').open() as src: auth_base = src.read() mark, auth_gitolite = None, list(map(str.strip, sys.stdin.read().splitlines())) for n, line in enumerate(auth_gitolite): if mark is None and line == '# gitolite start': mark, line = True, None elif line == '# gitolite end': mark, line = False, None if not mark: line = None if line: m = re.search( # Two supported input-line formats here: # - authorized_keys file with "command=... key" lines, # for manual "ssh git@gw < ~/.ssh/authorized_keys" operation. # - push-authkeys trigger output with "# gl-push-authkeys: ..." lines. r'^(command="\S+\s+(?P<id_ssh>[^"]+)".*?|# gl-push-authkeys: ##(?P<id_trigger>.*)##)' r'\s+(?P<key>(ssh-ed25519|ssh-rsa|ecdsa-sha2-nistp256|ssh-dss)\s+.*)$', line ) if not m: # Not dumping line itself here to avoid having pubkeys in the logs syslog_line('Failed to match gitolite ssh-auth line {}'.format(n)) line = None else: gl_key, ssh_key = m['id_ssh'] or m['id_trigger'], m['key'] cmd = '{} {}'.format(gl_proxy_path, gl_key).replace('\\', '\\\\').replace('"', r'\"') auth_opts = ',{}'.format(gl_auth_opts) if gl_auth_opts.strip() else '' line = 'command="{}"{} {}'.format(cmd, auth_opts, ssh_key) auth_gitolite[n] = line auth_gitolite = '\n'.join(filter(None, auth_gitolite)) # Not done via tempfile to avoid allowing rename() in ~/.ssh dir to this uid with (ssh_dir / 'authorized_keys').open('a+') as dst: dst.seek(0) with (ssh_dir / 'authorized_keys.old').open('w') as bak: bak.write(dst.read()) bak.flush() os.fdatasync(bak.fileno()) dst.seek(0) dst.truncate() dst.write(auth_base) dst.write('\n### Gitolite proxy commands\n') dst.write(auth_gitolite) dst.write('\n') sys.stdout.write('\n'.join([ssh_pubkey, gl_wrapper_script]))
def _update_config_from_v1(self, db_file): f = open(db_file, 'r') content = f.read() f.close() content = content.replace('target_id', 'palcache_id') content = content.replace('target_name', 'palcache_name') content = content.replace('"target"', '"palcache"') content = content.replace('"version": "v1"', '"version": "%s"' % CURR_DB_VERSION) tmp_db_file = "%s.v2" % db_file f = open(tmp_db_file, 'w', 0) f.write(content) os.fdatasync(f) f.close() os.rename(db_file, "%s.v1.bak" % db_file) os.rename(tmp_db_file, db_file)
def save_settings(self, filename): self.logger.info("Settings saved to " + str(filename)) """Writes the game settings to *filename*. See :meth:`load_settings`.""" if os.path.exists(filename): if os.path.exists(filename + '.bak'): os.remove(filename + '.bak') try: os.rename(filename, filename + '.bak') except: pass try: if os.path.exists(filename): os.remove(filename) with open(filename, 'w') as stream: yaml.dump(self.user_settings, stream) stream.flush() os.fdatasync(stream) # stream.close() # stream = None # os.fsync() # os.rename(filename+'.temp', filename) # Create backup File if data is good if os.path.getsize(filename) > 0: if os.path.exists(filename + '.bak'): os.remove(filename + '.bak') copyfile(filename, filename + '.bak') # stream = open(filename, 'w', 0) # yaml.dump(self.user_settings, stream) # file.close(stream) # stream.close() # if os.path.getsize(filename) == 0: # self.logger.error( " ****** CORRUPT GAME USER SETTINGS FILE REPLACING WITH CLEAN DATA --- restoring last copy ****************") # #remove bad file # os.remove(filename) # os.rename(filename+'.bak', filename) # else: # self.logger.info("Settings saved to " + str(filename)) except Exception, e: self.logger.error("CANNOT SAVE SETTINGS FILE:" + str(filename) + " - " + str(e))
def format_json_in_place(pathname, sync=True, indent_level=4): dirname = os.path.dirname(pathname) indent_string = ' ' * indent_level with open(pathname, 'r') as fp: try: data = json.load(fp) except ValueError: sys.stderr.write("In file: {}\n".format(fp.name)) raise # Create a temporary file in the same directory. with tempfile.NamedTemporaryFile(mode='w', dir=dirname, delete=False) as tmp_fp: json.dump( data, tmp_fp, ensure_ascii=False, indent=indent_string, separators=(',', ': '), sort_keys=True, ) tmp_fp.write('\n') # add a trailing newline. tmp_fp.flush() if sync: # Before we replace the old file with the new one, # force the new file to be fully written to disk. # Linux-only. # https://blog.gocept.com/2013/07/15/reliable-file-updates-with-python/ logging.debug('attempting to run fdatasync on {}'.format( tmp_fp.name)) try: os.fdatasync(tmp_fp) except AttributeError: logging.info("os.fdatasync not available on '{}'".format( platform.system())) pass else: logging.warning( "file may not be fully written to disk: '{}'".format( tmp_fp.name)) # Attempt to replace the file atomically. logging.debug("replacing '{}' with '{}'".format(tmp_fp.name, pathname)) try: os.replace(tmp_fp.name, pathname) except AttributeError: # In Python 2.7, os.replace is not available. os.rename(tmp_fp.name, pathname)
def log(self, text): """Write text to the log file and print it on the screen, if enabled. The entire log (maintained across reboots) can be found in self.log_file. """ if not self.log_file or not os.path.exists(self.state_dir): # Called before environment was initialized, ignore. return timestamp = datetime.datetime.strftime( datetime.datetime.now(), '%I:%M:%S %p:') with open(self.log_file, 'a') as log_f: log_f.write('%s %s\n' % (timestamp, text)) log_f.flush() os.fdatasync(log_f)
def atomic_writer(fname: str, mode: str = "w+b", chmod: int = 0o664, sync: bool = True, use_umask: bool = False, **kw): """ open/tempfile wrapper to atomically write to a file, by writing its contents to a temporary file in the same directory, and renaming it at the end of the block if no exception has been raised. :arg fname: name of the file to create :arg mode: passed to mkstemp/open :arg chmod: permissions of the resulting file :arg sync: if True, call fdatasync before renaming :arg use_umask: if True, apply umask to chmod All the other arguments are passed to open """ if use_umask: cur_umask = os.umask(0) os.umask(cur_umask) chmod &= ~cur_umask dirname = os.path.dirname(fname) if not os.path.isdir(dirname): os.makedirs(dirname) fd, abspath = tempfile.mkstemp(dir=dirname, text="b" not in mode, prefix=fname) outfd = open(fd, mode, closefd=True, **kw) try: yield outfd outfd.flush() if sync: os.fdatasync(fd) os.fchmod(fd, chmod) os.rename(abspath, fname) except Exception: os.unlink(abspath) raise finally: outfd.close()
def on_fsync(self, httpfs_request_args): """ Called when HttpFsRequest.OP_FSYNC is received from the client :param httpfs_request_args: The client request arg dict """ response_obj = HttpFsResponse() try: if httpfs_request_args["datasync"]: os.fdatasync(httpfs_request_args["file_descriptor"]) else: os.fsync(httpfs_request_args["file_descriptor"]) except Exception as e: logging.error("Error during fsync request: {}".format(e)) response_obj.set_err_no(errno.EIO) response_obj.set_data({"message": str(e)}) self.send_json_response(http.HTTPStatus.OK, response_obj.as_dict())
def AtomicSaveFileWBackup(fname, content): """Atomically changes the contents of a file and keeps a backup. Preserves file permissions and ownership. """ MakeBackup(fname) # create a temporary file auxfname = fname + '.tomove' f = open(auxfname, 'w') f.write(content) f.flush() os.fdatasync(f.fileno()) f.close() old_stats = os.stat(fname) # apply old permission bits and ownership os.chmod(auxfname, stat.S_IMODE(old_stats.st_mode)) os.chown(auxfname, old_stats.st_uid, old_stats.st_gid) # rename temporary file to the actual file os.rename(auxfname, fname)
def fsync(self, path, datasync, fh): # I must wait with uploading a written file until the flush and fsync # for it happened, right? # Or am I safe if I just upload *closed* files? print("fsync", path, fh) if datasync != 0: return os.fdatasync(fh) else: return os.fsync(fh)
def _init_captcha_key(self): """Read or create captcha key file """ try: with open(self.captcha_key_location, 'rb') as f: self._captcha_key = f.read() except IOError as e: if e.errno != errno.ENOENT: raise new_key = os.urandom(8) # write key with secure mode with open(self.captcha_key_location, 'wb') as f: os.fchmod(f.fileno(), 0o600) f.write(new_key) os.fdatasync(f.fileno()) # re-read key from file system in case somebody else wrote to it. with open(self.captcha_key_location, 'rb') as f: self._captcha_key = f.read()
def prepare(self, val): """ Saves the specified primary key string as the new checkpoint. The Checkpoint value is a dictionary {'date': date, 'transaction': txn} - date - datetime.datetime object in UTC (or a consistent time zone) - txn - integer (can be None) - aux - arbitrary pickable object (dictionary? can be None) """ #TODO: see DateTransactionCheckpoint - maybe merge the 2 functions (factor out common part) datestamp = val['date'] # This could be called by a function where txn or aux are not defined (set_date_transaction) txn = val.get('transaction') aux = val.get('aux') # date must be defined, transaction can be None if datestamp is None: raise IOError( "Checkpoint.createPending was passed null values for date") # Check timestamp validity if not type(datestamp) == datetime: # raise IOError("Checkpoint.createPending was passed invalid date (%s, %s)" % (type(datestamp), datestamp)) # attempting to convert to datetime - interpreting as seconds form the Epoch (UTC) datestamp = datetime.utcfromtimestamp(datestamp) self._pending_dateStamp = datestamp self._pending_transaction = txn self._pending_aux = aux # Get rid of extant pending file, if any. # truncate and write should be faster and as safe as # unlink and close if not self._tmp_fp: self._tmp_fp, self._tmp_filename = self.get_tempfile( self._target, '.pending') if self._tmp_fp: self._tmp_fp.seek(0) pickle.dump([datestamp, txn, aux], self._tmp_fp, -1) self._tmp_fp.truncate() self._tmp_fp.flush() # make sure that the file is on disk try: os.fdatasync(self._tmp_fp) except AttributeError: # This is not available on MacOS pass self._pending = True
def save_game_data(self, filename): """Writes the game data to *filename*. See :meth:`load_game_data`.""" if os.path.exists(filename): if os.path.exists(filename + '.bak'): os.remove(filename + '.bak') try: os.rename(filename, filename + '.bak') except: pass try: if os.path.exists(filename): os.remove(filename) with open(filename, 'w') as stream: yaml.dump(self.game_data, stream) stream.flush() os.fdatasync(stream) # stream.close() # stream = None # os.fsync() # os.rename(filename+'.temp', filename) # Create backup File if data is good if os.path.getsize(filename) > 0: if os.path.exists(filename + '.bak'): os.remove(filename + '.bak') copyfile(filename, filename + '.bak') # stream = open(filename, 'w', 0) # yaml.dump(self.game_data, stream) # file.close(stream) # stream.close() # now check for successful write, if not restore backup file # if os.path.getsize(filename) == 0: # self.logger.info( " **************** CORRUPT DATA FILE REPLACING WITH CLEAN DATA --- restoring last copy ****************") # #remove bad file # os.remove(filename) # os.rename(filename+'.bak', filename) except Exception, e: self.logger.error("CANNOT SAVE GAME DATA FILE:" + str(filename) + " - " + str(e))
def FSYNC(self, msg): if msg.fh_uuid not in self.fm: logger.error("FSYNC on not opened file %s:%s", UUID(bytes=msg.fh_uuid), msg.path) raise OSError(errno.EBADF) fh = self.fm[msg.fh_uuid].fh if msg.datasync: return os.fdatasync(fh) else: return os.fsync(fh)
def robust_file_write(directory: str, filename: str, data: str) -> None: """Robust file write. Use "write to temp file and rename" model for writing the persistence file. :param directory: Target directory to create a file. :param filename: File name to store specified data. :param data: String data. """ tempname = None dirfd = None try: dirfd = os.open(directory, os.O_DIRECTORY) # write data to temporary file with tempfile.NamedTemporaryFile(prefix=filename, dir=directory, delete=False) as tf: tempname = tf.name tf.write(data.encode('utf-8')) tf.flush() os.fdatasync(tf.fileno()) tf.close() # Fsync the directory to ensure the fact of the existence of # the temp file hits the disk. os.fsync(dirfd) # If destination file exists, it will be replaced silently. os.rename(tempname, os.path.join(directory, filename)) # Fsync the directory to ensure the rename hits the disk. os.fsync(dirfd) except OSError: with excutils.save_and_reraise_exception(): LOG.error("Failed to write persistence file: %(path)s.", {'path': os.path.join(directory, filename)}) if tempname is not None: if os.path.isfile(tempname): os.unlink(tempname) finally: if dirfd is not None: os.close(dirfd)
def csv_write(self, num, id, TIME): rec = {} rec['raspi_sid'] = self.raspi_sid rec['mode'] = 2 rec['sensor_id'] = self.sensor_id rec['timestamp'] = TIME rec['person'] = self.expr rec['suborder'] = self.Order if id == 0: rec['status'] = 'touched' rec['process_id'] = config['process_machine_id'] elif id == 1: rec['status'] = 'start' rec['process_id'] = num elif id == 2: rec['status'] = 'end' rec['process_id'] = num rec['starttime'] = self.start_time else: rec['status'] = 'released' rec['process_id'] = config['process_machine_id'] rec['starttime'] = self.starttime dict = [ 'raspi_sid', 'mode', 'status', 'timestamp', 'suborder', 'person', 'sensor_id', 'count', 'starttime', 'process_id' ] keys = rec.keys() data = [] for i in dict: if i in keys: data.append(rec[i]) else: data.append(None) DIR = '/home/pi/data/' file_name = self.raspi_sid + '_' + time.strftime("%Y%m%d") + '.csv' file = open(DIR + file_name, 'a') writer = csv.writer(file, lineterminator='\n') writer.writerow(data) file.flush() os.fdatasync(file.fileno()) file.close()
def fsync(self, path, datasync, fd): fr = core.path2fr(Path(path)) if acquire_wlock(fr): if datasync != 0: result = os.fdatasync(fd) else: result = os.fsync(fd) upload(fr) return result else: raise FuseOSError(errno.ENOACCESS)
def _write_schema(self, fingerprint): try: os.makedirs(self._DIR) except EnvironmentError as e: if e.errno != errno.EEXIST: raise with tempfile.NamedTemporaryFile('wb', prefix=fingerprint, dir=self._DIR, delete=False) as f: try: self._write_schema_data(f) f.flush() os.fdatasync(f.fileno()) f.close() except Exception: os.unlink(f.name) raise else: os.rename(f.name, os.path.join(self._DIR, fingerprint))