def clone_touch(in_path, out_path): st = os.stat(in_path) with open(out_path, "a") as f: os.utime(out_path, (st.st_atime, st.st_mtime)) os.fchown(f.fileno(), st.st_uid, st.st_gid) os.fchmod(f.fileno(), st.st_mode)
def write_file(path, content, owner='root', group='root', perms=0o444): """Create or overwrite a file with the contents of a byte string.""" uid = pwd.getpwnam(owner).pw_uid gid = grp.getgrnam(group).gr_gid # lets see if we can grab the file and compare the context, to avoid doing # a write. existing_content = None existing_uid, existing_gid = None, None try: with open(path, 'rb') as target: existing_content = target.read() stat = os.stat(path) existing_uid, existing_gid = stat.st_uid, stat.st_gid except: pass if content != existing_content: log("Writing file {} {}:{} {:o}".format(path, owner, group, perms), level=DEBUG) with open(path, 'wb') as target: os.fchown(target.fileno(), uid, gid) os.fchmod(target.fileno(), perms) if six.PY3 and isinstance(content, six.string_types): content = content.encode('UTF-8') target.write(content) return # the contents were the same, but we might still need to change the # ownership. if existing_uid != uid: log("Changing uid on already existing content: {} -> {}" .format(existing_uid, uid), level=DEBUG) os.chown(path, uid, -1) if existing_gid != gid: log("Changing gid on already existing content: {} -> {}" .format(existing_gid, gid), level=DEBUG) os.chown(path, -1, gid)
def fork_worker(self, wid): if not self.is_master: self.log.warn("tried to fork a worker from a worker") return True tmpfname = self.control_path(self.HEALTHFILE % wid) tmpfd = os.open(tmpfname, os.O_RDONLY) if self.worker_uid is not None: os.fchown(tmpfd, self.worker_uid, os.getegid()) pid = os.fork() if pid and self.is_master: self.log.info("worker forked: %d" % pid) self._worker_forked(wid, pid, tmpfd, tmpfname) return False if self.workers is None: self.log.error("forked a worker from a worker, exiting") sys.exit(1) self._worker_postfork(wid, pid, tmpfd) self.server.serve() return True
def create(self): self.setup() self.logger.debug("%r create pidfile: %s", self, self.filename) self.fh = open(self.filename, 'a+') if self.lock_pidfile: try: fcntl.flock(self.fh.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB) except IOError as exc: self.close(cleanup=False) if self.allow_samepid: return raise PidFileAlreadyLockedError(exc) check_result = self.check() if check_result == PID_CHECK_SAMEPID: return if self.chmod: os.fchmod(self.fh.fileno(), self.chmod) if self.uid >= 0 or self.gid >= 0: os.fchown(self.fh.fileno(), self.uid, self.gid) self.fh.seek(0) self.fh.truncate() # pidfile must consist of the pid and a newline character self.fh.write("%d\n" % self.pid) self.fh.flush() self.fh.seek(0) atexit.register(self.close)
def restore_attrs(self, path, item, symlink=False, fd=None): xattrs = item.get(b'xattrs') if xattrs: for k, v in xattrs.items(): try: xattr.setxattr(fd or path, k, v) except OSError as e: if e.errno != errno.ENOTSUP: raise uid = gid = None if not self.numeric_owner: uid = user2uid(item[b'user']) gid = group2gid(item[b'group']) uid = uid or item[b'uid'] gid = gid or item[b'gid'] # This code is a bit of a mess due to os specific differences try: if fd: os.fchown(fd, uid, gid) else: os.lchown(path, uid, gid) except OSError: pass if fd: os.fchmod(fd, item[b'mode']) elif not symlink: os.chmod(path, item[b'mode']) elif has_lchmod: # Not available on Linux os.lchmod(path, item[b'mode']) if fd and utime_supports_fd: # Python >= 3.3 os.utime(fd, None, ns=(item[b'mtime'], item[b'mtime'])) elif utime_supports_fd: # Python >= 3.3 os.utime(path, None, ns=(item[b'mtime'], item[b'mtime']), follow_symlinks=False) elif not symlink: os.utime(path, (item[b'mtime'] / 10**9, item[b'mtime'] / 10**9))
def AtomicCreateFile(file_path, binary=False, perm=None, uidgid=None): """Open a temporary file for writing, rename to final name when done""" tmp_file_path = file_path + "~" mode = "wb" if binary else "w" # copy permissions and ownership from existing file if perm is None or uidgid is None: try: st = os.stat(file_path) except FileNotFoundError: st = None if perm is None: perm = st.st_mode if st else 0o600 if uidgid is None and st is not None: uidgid = (st.st_uid, st.st_gid) fd = os.open(tmp_file_path, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, perm) try: if uidgid: os.fchown(fd, uidgid[0], uidgid[1]) with os.fdopen(fd, mode) as out_file: yield out_file os.rename(tmp_file_path, file_path) except: try: os.unlink(tmp_file_path) except: # pylint: disable=bare-except pass raise
def save_container(container, path): temp = PersistentTemporaryFile( prefix=('_' if iswindows else '.'), suffix=os.path.splitext(path)[1], dir=os.path.dirname(path)) if hasattr(os, 'fchmod'): # Ensure file permissions and owner information is preserved fno = temp.fileno() try: st = os.stat(path) except EnvironmentError as err: if err.errno != errno.ENOENT: raise # path may not exist if we are saving a copy, in which case we use # the metadata from the original book st = os.stat(container.path_to_ebook) os.fchmod(fno, st.st_mode) os.fchown(fno, st.st_uid, st.st_gid) temp.close() temp = temp.name try: container.commit(temp) atomic_rename(temp, path) finally: if os.path.exists(temp): os.remove(temp)
def request_token(self, uid): pw_user = pwd.getpwuid(uid) if pw_user == None: self.write_error("User not found") return polo_dir = os.path.join(pw_user.pw_dir, ".polo") if not os.path.exists(polo_dir): os.mkdir(polo_dir) os.chown(polo_dir, pw_user.pw_uid, pw_user.pw_gid) if not os.path.isfile(os.path.join(polo_dir, "token")): try: f = open(os.path.join(polo_dir, "token"), "wb") os.fchmod(f.fileno(), stat.S_IRUSR | stat.S_IWUSR | stat.S_IRUSR) os.fchown(f.fileno(), pw_user.pw_uid, pw_user.pw_gid) f.write(tokenprovider.create_token(uid, self.secret)) f.close() except Exception as e: self.write_error(str(e)) return self.write_ok(0) else: self.write_ok(1)
def serialize(self): """ Serialize internal data structures into the configuration file. """ assert ltrace(globals()['TRACE_' + self.name.upper()], '| serialize()') with self.lock: if self.changed: try: #raise the hint level, so as the MOVED_TO event will make it # just back to the state where only ONE event will suffice to # trigger it again. self.conf_hint += 1 except: pass #print '>> serialize', self.conf_hint ftemp, fpath = tempfile.mkstemp(dir=settings.config_dir) os.write(ftemp, '%s\n' % '\n'.join(sorted(self.iterkeys()))) os.fchmod(ftemp, 0644) os.fchown(ftemp, 0, 0) os.close(ftemp) os.rename(fpath, self.conf_file) self.changed = False
def _cache_open(pathname, mode): f = open(pathname, mode) if util.via_sudo(): uid = int(os.environ['SUDO_UID']) gid = int(os.environ['SUDO_GID']) os.fchown(f.fileno(), uid, gid) return f
def do_fchown(fd, uid, gid): try: os.fchown(fd, uid, gid) except OSError as err: raise SwiftOnFileSystemOSError( err.errno, '%s, os.fchown(%s, %s, %s)' % ( err.strerror, fd, uid, gid))
def write_from_dict(filepath, d, validate=lambda x, y: True): """Takes a dictionary and appends the contents to a file An optional validation callback can be passed to perform validation on each value in the dictionary. e.g. def validation_callback(dictionary_key, dictionary_value): if not dictionary_value.isdigit(): raise Exception('value contains non-digit character(s)') Any callback supplied to this function should raise an exception if validation fails. """ # Sort items asciibetically # the output of the deployment should not depend # on the locale of the machine running the deployment items = sorted(d.iteritems(), key=lambda (k, v): [ord(c) for c in v]) for (k, v) in items: validate(k, v) with open(filepath, 'a') as f: for (_, v) in items: f.write('%s\n' % v) os.fchown(f.fileno(), 0, 0) os.fchmod(f.fileno(), 0644)
def do_fchown(fd, uid, gid): try: os.fchown(fd, uid, gid) except OSError as err: logging.exception("fchown failed on %d err: %s", fd, err.strerror) raise return True
def save_container(container, path): temp = PersistentTemporaryFile( prefix=('_' if iswindows else '.'), suffix=os.path.splitext(path)[1], dir=os.path.dirname(path)) if hasattr(os, 'fchmod'): # Ensure file permissions and owner information is preserved fno = temp.fileno() try: st = os.stat(path) except EnvironmentError as err: if err.errno != errno.ENOENT: raise # path may not exist if we are saving a copy, in which case we use # the metadata from the original book st = os.stat(container.path_to_ebook) os.fchmod(fno, st.st_mode) try: os.fchown(fno, st.st_uid, st.st_gid) except EnvironmentError as err: if err.errno != errno.EPERM: # ignore chown failure as user could be editing file belonging # to a different user, in which case we really cant do anything # about it short of making the file update non-atomic raise temp.close() temp = temp.name try: container.commit(temp) atomic_rename(temp, path) finally: if os.path.exists(temp): os.remove(temp)
def _cache_open(pathname, mode): f = open(pathname, mode) if 'SUDO_UID' in os.environ and 'SUDO_GID' in os.environ: uid = int(os.environ['SUDO_UID']) gid = int(os.environ['SUDO_GID']) os.fchown(f.fileno(), uid, gid) return f
def acquire(self): if self.fd: return if self.has_exlock(): raise LockfileLockedError( 'Lock file already locked by PID %s' % self.get_pid()) pwent = pwd.getpwnam(self.user) if not os.path.exists(self.path): os.mkdir(self.path) os.chown(self.path, pwent.pw_uid, pwent.pw_gid) os.chmod(self.path, 0o755) self.fd = os.open(self.filename, os.O_WRONLY | os.O_CREAT | os.O_TRUNC) os.fchown(self.fd, pwent.pw_uid, pwent.pw_gid) os.fchmod(self.fd, 0o644) try: self.lock = fcntl.lockf(self.fd, fcntl.LOCK_EX | fcntl.LOCK_NB) except: os.close(self.fd) self.fd = None raise LockfileEstablishError("Could not establish lock %s" % self.filename) os.write(self.fd, "%s\n" % os.getpid())
def acquire(self): """Acquire an exclusive lock Returns immediately if lock was already acquired within this process. Raises: LockfileLockedError: The lockfile is in use by another process. LockfileEstablishError: The lock could not be established for some other reason. """ if self.fd: return if self.has_exlock(): raise LockfileLockedError( 'Lock file already locked by PID %s' % self.get_pid()) pwent = pwd.getpwnam(self.user) if not os.path.exists(self.dir): os.mkdir(self.dir) os.chown(self.dir, pwent.pw_uid, pwent.pw_gid) os.chmod(self.dir, 0o755) self.fd = os.open(self.filename, os.O_WRONLY | os.O_CREAT | os.O_TRUNC) os.fchown(self.fd, pwent.pw_uid, pwent.pw_gid) os.fchmod(self.fd, 0o644) try: self.lock = fcntl.lockf(self.fd, fcntl.LOCK_EX | fcntl.LOCK_NB) except: os.close(self.fd) self.fd = None raise LockfileEstablishError("Could not establish lock %s" % self.filename) os.write(self.fd, "%s\n" % os.getpid())
def lock(file_dir): ruid = 0 rgid = 0 try: ruid = pwd.getpwnam("root").pw_uid except KeyError: pass try: rgid = grp.getgrnam("root").gr_gid except KeyError: pass try: with open(file_dir) as f: try: os.fchmod(f.fileno(), 0444) except OSError as e: if e.errno != errno.EPERM: raise try: os.fchown(f.fileno(), ruid, -1) except OSError as e: if e.errno != errno.EPERM: raise try: os.fchown(f.fileno(), -1, rgid) except OSError as e: if e.errno != errno.EPERM: raise except IOError as e: if e.errno != errno.EISDIR: raise else: try: os.chmod(file_dir, 0555) except OSError as e: if e.errno != errno.EPERM: raise try: os.chown(file_dir, ruid, -1) except OSError as e: if e.errno != errno.EPERM: raise try: os.chown(file_dir, -1, rgid) except OSError as e: if e.errno != errno.EPERM: raise
def write_file(fn, contents, perms=None, owner=None, group=None): if perms: perms2=perms else: perms2=0o666 fd=os.open(fn, os.O_CREAT | os.O_RDWR, perms2) # Bypass umask if perms: os.fchmod(fd, perms) if owner or group: if owner: pw=pwd.getpwnam(owner) uid=pw.pw_uid else: uid=-1 if group: gr=grp.getgrnam(group) gid=gr.gr_gid else: gid=-1 os.fchown(fd, uid, gid) f=os.fdopen(fd, 'w') f.write(contents) f.close()
def install_file(contents, dest, owner="root", group="root", mode=0600): uid = getpwnam(owner)[2] gid = getgrnam(group)[2] dest_fd = os.open(dest, os.O_WRONLY | os.O_TRUNC | os.O_CREAT, mode) os.fchown(dest_fd, uid, gid) with os.fdopen(dest_fd, 'w') as destfile: destfile.write(str(contents))
def __exit__(self, exc_type, exc_val, exc_tb): if exc_type is not None: # something went wrong, reset self.lines = None self.stat = None return directory, prefix = os.path.split(self.filename) # use tempfile in same directory to have atomic rename fd, name = tempfile.mkstemp(prefix=prefix, dir=directory, text=True) with io.open(fd, mode='w', closefd=True) as f: for line in self.lines: if not isinstance(line, six.text_type): line = line.decode('utf-8') f.write(line) self.lines = None os.fchmod(f.fileno(), stat.S_IMODE(self.stat.st_mode)) os.fchown(f.fileno(), self.stat.st_uid, self.stat.st_gid) self.stat = None # flush and sync tempfile inode f.flush() os.fsync(f.fileno()) # rename file and sync directory inode os.rename(name, self.filename) dirfd = os.open(directory, os.O_RDONLY | os.O_DIRECTORY) try: os.fsync(dirfd) finally: os.close(dirfd)
def chown_and_chmod(f, mode, uid=-1, group=-1): """Change group and permissions of a file handle or file path Parameters ---------- f : file handle or string mode : binary uid : None or int None or -1 does not set uid group : None, int, or string If None or -1, do not set gid. If int or string, set appropriate gid: string is interpreted as group name, which must be converted by OS to GID, while int is interpreted directly as a GID. """ if group is None: gid = -1 elif isinstance(group, basestring): gid = get_gid(group) elif isinstance(group, int): gid = group else: raise TypeError('Invalid `group`: %s, type %s' % (group, type(group))) if uid is None: uid = -1 if isinstance(f, file): os.fchown(f.fileno(), uid, gid) os.fchmod(f.fileno(), mode) elif isinstance(f, basestring): os.chown(f, uid, gid) os.chmod(f, mode) else: raise TypeError('Unhandled type for arg `f`: %s' % type(f))
def down(queue, user=None, group=None, mode=None, host=None) : '''Down a queue, by creating a down file''' # default our owners and mode user, group, mode = _dflts(user, group, mode) down_path = fsq_path.down(queue, host=host) fd = None created = False try: # try to guarentee creation try: fd = os.open(down_path, os.O_CREAT|os.O_WRONLY|os.O_EXCL, mode) created = True except (OSError, IOError, ) as e: if e.errno != errno.EEXIST: raise e fd = os.open(down_path, os.O_CREAT|os.O_WRONLY, mode) if user is not None or group is not None: os.fchown(fd, *uid_gid(user, group, fd=fd)) if not created: os.fchmod(fd, mode) except (OSError, IOError, ) as e: if created: _cleanup(down_path, e) _raise(down_path, e) finally: if fd is not None: os.close(fd)
def do_fchown(fd, uid, gid): try: # TODO: grab path name from fd, chown that os.fchown(fd, uid, gid) except IOError as err: raise SwiftOnFileSystemOSError( err.errno, '%s, os.fchown(%s, %s, %s)' % ( err.strerror, fd, uid, gid))
def make_lingua(fname): """ Changes a given file to the unix group 'lingua' """ import grp fd = os.open(fname, os.O_RDONLY) os.fchown(fd, -1, grp.getgrnam('lingua').gr_gid) os.close(fd)
def fchown(space, w_fd, uid, gid): """Change the owner and group id of the file given by file descriptor fd to the numeric uid and gid.""" fd = space.c_filedescriptor_w(w_fd) try: os.fchown(fd, uid, gid) except OSError as e: raise wrap_oserror(space, e)
def _open(self): file = logging.handlers.RotatingFileHandler._open(self) if self.uid is not None: os.fchown(file.fileno(), self.uid, -1) if self.gid is not None: os.fchown(file.fileno(), -1, self.gid) #print "log file:", file.name, self.uid, self.gid, os.fstat(file.fileno()) return file
def newServerKeys(path, keyid): skey = JWK(generate='RSA', use='sig', kid=keyid) ekey = JWK(generate='RSA', use='enc', kid=keyid) with open(path, 'w') as f: os.fchmod(f.fileno(), 0o600) os.fchown(f.fileno(), 0, 0) f.write('[%s,%s]' % (skey.export(), ekey.export())) return [skey.get_op_key('verify'), ekey.get_op_key('encrypt')]
def _daemonize(self): """Fork into a background process and setup the process, copied in part from http://www.jejik.com/files/examples/daemon3x.py """ LOGGER.info('Forking %s into the background', sys.argv[0]) # Write the pidfile if current uid != final uid if os.getuid() != self.uid: fd = open(self.pidfile_path, 'w') os.fchmod(fd.fileno(), 0o644) os.fchown(fd.fileno(), self.uid, self.gid) fd.close() try: pid = os.fork() if pid > 0: sys.exit(0) except OSError as error: raise OSError('Could not fork off parent: %s', error) # Set the user id if self.uid != os.getuid(): os.setuid(self.uid) # Set the group id if self.gid != os.getgid(): try: os.setgid(self.gid) except OSError as error: LOGGER.error('Could not set group: %s', error) # Decouple from parent environment os.chdir('/') os.setsid() os.umask(0o022) # Fork again try: pid = os.fork() if pid > 0: sys.exit(0) except OSError as error: raise OSError('Could not fork child: %s', error) # redirect standard file descriptors sys.stdout.flush() sys.stderr.flush() si = open(os.devnull, 'r') so = open(os.devnull, 'a+') se = open(os.devnull, 'a+') os.dup2(si.fileno(), sys.stdin.fileno()) os.dup2(so.fileno(), sys.stdout.fileno()) os.dup2(se.fileno(), sys.stderr.fileno()) # Automatically call self._remove_pidfile when the app exits atexit.register(self._remove_pidfile) self._write_pidfile()
def copy_file(self, source, dest, restore=None, sourceChecksum=None): try: # represents max buffer size BUF_MAX = 16 * 1024 # so we don't get stuck on I/O ops errfile = None src = open(source, 'rb') total = os.path.getsize(source) current = 0 dst = open(dest, 'wb') while True: read = src.read(BUF_MAX) if(read): dst.write(read) else: break src.close() if(errfile): # Remove aborted file (avoid corruption) dst.close() os.remove(errfile) else: fd = dst.fileno() if(self.preserve_perms): # set permissions finfo = os.stat(source) owner = finfo[stat.ST_UID] group = finfo[stat.ST_GID] os.fchown(fd, owner, group) dst.flush() os.fsync(fd) dst.close() if(self.preserve_times): finfo = os.stat(source) atime = finfo[stat.ST_ATIME] mtime = finfo[stat.ST_MTIME] os.utime(dest, (atime, mtime)) else: dst.flush() os.fsync(fd) dst.close() if(self.postcheck): file1 = '' if (sourceChecksum is not None): file1 = sourceChecksum else: file1 = self.get_checksum(source, restore) file2 = self.get_checksum(dest, restore) if(file1 not in file2): print _("Checksum Mismatch:") + " [" + file1 + "] [" + file1 + "]" self.errors.append([source, _("Checksum Mismatch")]) except OSError as bad: if(len(bad.args) > 2): print "{" + str(bad.args[0]) + "} " + bad.args[1] + " [" + bad.args[2] + "]" self.errors.append([bad.args[2], bad.args[1]]) else: print "{" + str(bad.args[0]) + "} " + bad.args[1] + " [" + source + "]" self.errors.append([source, bad.args[1]])
def test_fchown(self): os = self.posix f = open(self.path, "w") os.fchown(f.fileno(), os.getuid(), os.getgid()) f.close()
def main(): arg_parser = argparse.ArgumentParser() arg_parser.add_argument("config_file", type=argparse.FileType("r"), help="path to a configuration file") arg_parser.add_argument("--debug", default=False, action="store_true", help="enable debug logging") args = arg_parser.parse_args() if args.debug: level = logging.DEBUG else: level = logging.WARNING logging.basicConfig(level=level) parser = configparser.RawConfigParser() parser.readfp(args.config_file) fetcher_config = dict(parser.items("secret-fetcher")) cfg = config.parse_config( fetcher_config, { "vault": { "url": config.String, "role": config.String, }, "output": { "path": config.Optional(config.String, default="/var/local/secrets.json"), "owner": config.Optional(user_name_to_uid, default=0), "group": config.Optional(group_name_to_gid, default=0), "mode": config.Optional(octal_integer, default=0o400), }, "secrets": config.Optional(config.TupleOf(config.String), default=[]), }) # pylint: disable=no-member client_factory = VaultClientFactory(cfg.vault.url, cfg.vault.role) while True: client = client_factory.get_client() secrets = {} soonest_expiration = client.token_expiration for secret_name in cfg.secrets: secrets[secret_name], expiration = client.get_secret(secret_name) soonest_expiration = min(soonest_expiration, expiration) with open(cfg.output.path, "w") as f: os.fchown(f.fileno(), cfg.output.owner, cfg.output.group) os.fchmod(f.fileno(), cfg.output.mode) json.dump({ "vault_token": client.token, "secrets": secrets, }, f, indent=2, sort_keys=True) time_til_expiration = soonest_expiration - datetime.datetime.utcnow() time_to_sleep = time_til_expiration - VAULT_TOKEN_PREFETCH_TIME time.sleep(max(int(time_to_sleep.total_seconds()), 1))
if cli_options.kill: try: with open(pid_file, "r") as f: pid = f.readline().strip() if not pid: raise IOError("no pid in pid file") log.info("sending SIGTERM to pid %s" % pid) os.kill(int(pid), signal.SIGTERM) exit(0) except IOError: log.warning("Couldn't find pidfile or pid file was empty. Please \ manually find and kill any existing focus.py process") exit(1) with open(pid_file, "w") as f: # Drop ownership of the pidfile os.fchown(f.fileno(), get_unprivileged_uid(), -1) f.write(str(os.getpid())) atexit.register(clean_up_pid) config.update(load_config(config_file)) # Bind our socket before we do pretty much anything, this means we can drop # privileges early, which is a necessaity before we start logging # # create our main server socket try: log.info("binding to %s:%d", config["bind_ip"], config["bind_port"]) server = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) server.setblocking(0) server.bind((config["bind_ip"], config["bind_port"])) # We're done doing things that need root, drop our privileges
tries, ) + tuple(args)) tmp_name = os.path.join(fsq_path.tmp(trg_queue), item_name) trg_fd = os.open(tmp_name, os.O_WRONLY | os.O_CREAT | os.O_EXCL, mode) except ( OSError, IOError, ), e: if isinstance(e, FSQError): raise e raise FSQEnqueueError(e.errno, wrap_io_os_err(e)) try: if user is not None or group is not None: # set user/group ownership for file; man 2 fchown os.fchown(trg_fd, *uid_gid(user, group, fd=trg_fd)) with closing(os.fdopen(trg_fd, 'wb', 1)) as trg_file: # i/o time ... assume line-buffered while True: if real_file: reads, dis, card = select.select([src_file], [], []) try: msg = os.read(reads[0].fileno(), 2048) if 0 == len(msg): break except ( OSError, IOError, ), e: if e.errno in ( errno.EWOULDBLOCK,
def __setup_softhsm(self): token_dir_exists = os.path.exists(paths.DNSSEC_TOKENS_DIR) # create dnssec directory if not os.path.exists(paths.IPA_DNSSEC_DIR): logger.debug("Creating %s directory", paths.IPA_DNSSEC_DIR) os.mkdir(paths.IPA_DNSSEC_DIR) os.chmod(paths.IPA_DNSSEC_DIR, 0o770) # chown ods:named os.chown(paths.IPA_DNSSEC_DIR, self.ods_uid, self.named_gid) # setup softhsm2 config file softhsm_conf_txt = ("# SoftHSM v2 configuration file \n" "# File generated by IPA instalation\n" "directories.tokendir = %(tokens_dir)s\n" "objectstore.backend = file") % { 'tokens_dir': paths.DNSSEC_TOKENS_DIR } logger.debug("Creating new softhsm config file") with open(paths.DNSSEC_SOFTHSM2_CONF, 'w') as f: os.fchmod(f.fileno(), 0o644) f.write(softhsm_conf_txt) # setting up named and ipa-dnskeysyncd to use our softhsm2 and # openssl configs self.setup_named_openssl_conf() self.setup_named_sysconfig() self.setup_ipa_dnskeysyncd_sysconfig() if (token_dir_exists and os.path.exists(paths.DNSSEC_SOFTHSM_PIN) and os.path.exists(paths.DNSSEC_SOFTHSM_PIN_SO)): # there is initialized softhsm return # remove old tokens if token_dir_exists: logger.debug('Removing old tokens directory %s', paths.DNSSEC_TOKENS_DIR) shutil.rmtree(paths.DNSSEC_TOKENS_DIR) # create tokens subdirectory logger.debug('Creating tokens %s directory', paths.DNSSEC_TOKENS_DIR) # sticky bit is required by daemon os.mkdir(paths.DNSSEC_TOKENS_DIR) os.chmod(paths.DNSSEC_TOKENS_DIR, 0o770 | stat.S_ISGID) # chown to ods:named os.chown(paths.DNSSEC_TOKENS_DIR, self.ods_uid, self.named_gid) # generate PINs for softhsm pin_length = 30 # Bind allows max 32 bytes including ending '\0' pin = ipautil.ipa_generate_password( entropy_bits=0, special=None, min_len=pin_length) pin_so = ipautil.ipa_generate_password( entropy_bits=0, special=None, min_len=pin_length) logger.debug("Saving user PIN to %s", paths.DNSSEC_SOFTHSM_PIN) with open(paths.DNSSEC_SOFTHSM_PIN, 'w') as f: # chown to ods:named os.fchown(f.fileno(), self.ods_uid, self.named_gid) os.fchmod(f.fileno(), 0o660) f.write(pin) logger.debug("Saving SO PIN to %s", paths.DNSSEC_SOFTHSM_PIN_SO) with open(paths.DNSSEC_SOFTHSM_PIN_SO, 'w') as f: # owner must be root os.fchmod(f.fileno(), 0o400) f.write(pin_so) # initialize SoftHSM command = [ paths.SOFTHSM2_UTIL, '--init-token', '--free', # use random free slot '--label', SOFTHSM_DNSSEC_TOKEN_LABEL, '--pin', pin, '--so-pin', pin_so, ] logger.debug("Initializing tokens") os.environ["SOFTHSM2_CONF"] = paths.DNSSEC_SOFTHSM2_CONF ipautil.run(command, nolog=(pin, pin_so,))
def fchown(fd, uid, gid): # 修改一个文件的所有权,这个函数修改一个文件的用户id和用户组id,该文件由文件描述符fd指定 os.fchown(fd, uid, gid)
def init_client_mmap(mmap_group=None, socket_filename=None, size=128*1024*1024, filename=None): """ Initializes an mmap area, writes the token in it and returns: (success flag, mmap_area, mmap_size, temp_file, mmap_filename) The caller must keep hold of temp_file to ensure it does not get deleted! This is used by the client. """ def rerr(): return False, False, None, 0, None, None log("init_mmap%s", (mmap_group, socket_filename, size, filename)) mmap_filename = filename mmap_temp_file = None delete = True def validate_size(size): assert size>=64*1024*1024, "mmap size is too small: %sB (minimum is 64MB)" % std_unit(size) assert size<=4*1024*1024*1024, "mmap is too big: %sB (maximum is 4GB)" % std_unit(size) try: import mmap unit = max(4096, mmap.PAGESIZE) #add 8 bytes for the mmap area control header zone: mmap_size = roundup(size + 8, unit) if WIN32: validate_size(mmap_size) if not filename: from xpra.os_util import get_hex_uuid filename = "xpra-%s" % get_hex_uuid() mmap_filename = filename mmap_area = mmap.mmap(0, mmap_size, filename) #not a real file: delete = False mmap_temp_file = None else: assert POSIX if filename: if os.path.exists(filename): fd = os.open(filename, os.O_EXCL | os.O_RDWR) mmap_size = os.path.getsize(mmap_filename) validate_size(mmap_size) #mmap_size = 4*1024*1024 #size restriction needed with ivshmem delete = False log.info("Using existing mmap file '%s': %sMB", mmap_filename, mmap_size//1024//1024) else: validate_size(mmap_size) import errno flags = os.O_CREAT | os.O_EXCL | os.O_RDWR try: fd = os.open(filename, flags) mmap_temp_file = None #os.fdopen(fd, 'w') mmap_filename = filename except OSError as e: if e.errno == errno.EEXIST: log.error("Error: the mmap file '%s' already exists", filename) return rerr() raise else: validate_size(mmap_size) import tempfile from xpra.platform.paths import get_mmap_dir mmap_dir = get_mmap_dir() subs = os.environ.copy() subs.update({ "UID" : os.getuid(), "GID" : os.getgid(), "PID" : os.getpid(), }) mmap_dir = shellsub(mmap_dir, subs) if mmap_dir and not os.path.exists(mmap_dir): os.mkdir(mmap_dir, 0o700) if not mmap_dir or not os.path.exists(mmap_dir): raise Exception("mmap directory %s does not exist!" % mmap_dir) #create the mmap file, the mkstemp that is called via NamedTemporaryFile ensures #that the file is readable and writable only by the creating user ID try: temp = tempfile.NamedTemporaryFile(prefix="xpra.", suffix=".mmap", dir=mmap_dir) except OSError as e: log.error("Error: cannot create mmap file:") log.error(" %s", e) return rerr() #keep a reference to it so it does not disappear! mmap_temp_file = temp mmap_filename = temp.name fd = temp.file.fileno() #set the group permissions and gid if the mmap-group option is specified mmap_group = (mmap_group or "") if POSIX and mmap_group and mmap_group not in FALSE_OPTIONS: group_id = None if mmap_group=="SOCKET": group_id = get_socket_group(socket_filename) elif mmap_group.lower()=="auto": group_id = xpra_group() if not group_id and socket_filename: group_id = get_socket_group(socket_filename) elif mmap_group.lower() in TRUE_OPTIONS: log.info("parsing legacy mmap-group value '%s' as 'auto'", mmap_group) log.info(" please update your configuration") group_id = xpra_group() or get_socket_group(socket_filename) else: group_id = get_group_id(mmap_group) if group_id>0: log("setting mmap file %s to group id=%i", mmap_filename, group_id) try: os.fchown(fd, -1, group_id) except OSError as e: log("fchown(%i, %i, %i) on %s", fd, -1, group_id, mmap_filename, exc_info=True) log.error("Error: failed to change group ownership of mmap file to '%s':", mmap_group) log.error(" %s", e) from stat import S_IRUSR,S_IWUSR,S_IRGRP,S_IWGRP os.fchmod(fd, S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP) log("using mmap file %s, fd=%s, size=%s", mmap_filename, fd, mmap_size) os.lseek(fd, mmap_size-1, os.SEEK_SET) assert os.write(fd, b'\x00') os.lseek(fd, 0, os.SEEK_SET) mmap_area = mmap.mmap(fd, length=mmap_size) return True, delete, mmap_area, mmap_size, mmap_temp_file, mmap_filename except Exception as e: log("failed to setup mmap: %s", e, exc_info=True) log.error("Error: mmap setup failed:") log.error(" %s", e) clean_mmap(mmap_filename) return rerr()
def set_perms(fileno): # type: (int) -> None os.fchown(fileno, 0, self.dovemail_gid) os.fchmod(fileno, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP)
def setup_named_conf(self, backup=False): """Create, update, or migrate named configuration files The method is used by installer and upgrade process. The named.conf is backed up the first time and overwritten every time. The user specific config files are created once and not modified in subsequent calls. The "dnssec-validation" option is migrated :returns: True if any config file was modified, else False """ # files are owned by root:named and are readable by user and group uid = 0 gid = pwd.getpwnam(constants.NAMED_USER).pw_gid mode = 0o640 changed = False if not self.fstore.has_file(paths.NAMED_CONF): self.fstore.backup_file(paths.NAMED_CONF) # named.conf txt = ipautil.template_file(os.path.join(paths.NAMED_CONF_SRC), self.sub_dict) with open(paths.NAMED_CONF) as f: old_txt = f.read() if txt == old_txt: logger.debug("%s is unmodified", paths.NAMED_CONF) else: if backup: if not os.path.isfile(paths.NAMED_CONF_BAK): shutil.copyfile(paths.NAMED_CONF, paths.NAMED_CONF_BAK) logger.info("created backup %s", paths.NAMED_CONF_BAK) else: logger.warning("backup %s already exists", paths.NAMED_CONF_BAK) with open(paths.NAMED_CONF, "w") as f: os.fchmod(f.fileno(), mode) os.fchown(f.fileno(), uid, gid) f.write(txt) logger.info("created new %s", paths.NAMED_CONF) changed = True # user configurations user_configs = ((paths.NAMED_CUSTOM_CONF_SRC, paths.NAMED_CUSTOM_CONF), (paths.NAMED_CUSTOM_OPTIONS_CONF_SRC, paths.NAMED_CUSTOM_OPTIONS_CONF)) for src, dest in user_configs: if not os.path.exists(dest): txt = ipautil.template_file(src, self.sub_dict) with open(dest, "w") as f: os.fchmod(f.fileno(), mode) os.fchown(f.fileno(), uid, gid) f.write(txt) logger.info("created named user config '%s'", dest) changed = True else: logger.info("named user config '%s' already exists", dest) return changed
def does_stuff(): # xxx not really a test, just checks that it is callable fd = os.open(tmpfile1, os.O_WRONLY | os.O_CREAT, 0777) os.fchown(fd, os.getuid(), os.getgid()) os.close(fd)
def patch(files, qtDirPath, where): if not os.path.isdir(qtDirPath): print(qtDirPath, "does not exist") sys.exit(-1) if os.path.exists( files ): with open(files, "r") as fn: filesToPatch = fn.read().split() else: filesToPatch = [] patterns = files.split(",") for dir_path, sub_dirs, subfiles in os.walk(where): for pat in patterns: for fn in fnmatch.filter(subfiles, pat): filesToPatch.append( os.path.join(dir_path, fn) ) # Make all paths relative to the patched file. # PortableExecutables have file size encoded in header. # Instead of modifying the header we replace by a string # of exactly the same size using padding "/". replacement = bytearray(".." + "/"*(len(qtDirPath)-2)) qtDirPathA = bytearray(qtDirPath) qtDirPathA2 = bytearray(qtDirPath.replace("\\", "/")) patches = 0 # a counter print("about to try to patch", len(filesToPatch), "files") for f in filesToPatch: prefix = "" + qtDirPath f = os.path.join(prefix,f) print("patch file", f) if not os.path.exists(f): print("qpatch: warning: file not found", f) continue source = None stat = None with open(f, "rb") as file_: source = bytearray(file_.read()) # store permissions stat = os.fstat(file_.fileno()) if source.find(qtDirPathA) == -1 and source.find(qtDirPathA2) == -1: print("string not found") continue # make backup, if backup already exists, skip the patching. if not os.path.exists( f+"_bkp" ): shutil.move(f, f+"_bkp") else: print("backup already exists, ignoring") continue patched = source.replace(qtDirPathA, replacement) patched = patched.replace(qtDirPathA2, replacement) with open( f, "wb") as out_: out_.write(patched) # restore permissions try: os.fchmod(out_.fileno(), stat.st_mode) os.fchown(out_.fileno(), stat.st_uid, stat.st_gid) except Exception as e: print("\n\tOops! Couldn't copy file metadata", type(e), e) patches += 1 print("ok") print("patched", patches, "files")
def inifile_replace_variables(filepath, section, replacevars=dict(), appendvars=dict()): """ Take a section-structured key=value based configuration file, and write new version with certain values replaced or appended within the section All (key,value) pairs from replacevars and appendvars that were not found in the configuration file, will be added there. It is responsibility of a caller to ensure that replacevars and appendvars do not overlap. It is responsibility of a caller to back up file. returns dictionary of affected keys and their previous values One have to run restore_context(filepath) afterwards or security context of the file will not be correct after modification """ pattern = re.compile( ''' (^ \[ (?P<section> .+) \] (\s+((\#|;).*)?)? $)|(^ \s* (?P<option> [^\#;]+?) (\s*=\s*) (?P<value> .+?)? (\s*((\#|;).*)?)? $)''', re.VERBOSE) def add_options(config, replacevars, appendvars, oldvars): # add all options from replacevars and appendvars that were not found in the file new_vars = replacevars.copy() new_vars.update(appendvars) newvars_view = set(new_vars.keys()) - set(oldvars.keys()) append_view = (set(appendvars.keys()) - newvars_view) for item in newvars_view: config.write("%s=%s\n" % (item, new_vars[item])) for item in append_view: config.write("%s=%s\n" % (item, appendvars[item])) orig_stat = os.stat(filepath) old_values = dict() temp_filename = None with tempfile.NamedTemporaryFile(mode='w', delete=False) as new_config: temp_filename = new_config.name with open(filepath, 'r') as f: in_section = False finished = False line_idx = 1 for line in f: line_idx = line_idx + 1 new_line = line m = pattern.match(line) if m: sect, option, value = m.group('section', 'option', 'value') if in_section and sect is not None: # End of the searched section, add remaining options add_options(new_config, replacevars, appendvars, old_values) finished = True if sect is not None: # New section is found, check whether it is the one we are looking for in_section = ( str(sect).lower() == str(section).lower()) if option is not None and in_section: # Great, this is an option from the section we are loking for if replacevars and option in replacevars: # replace value completely new_line = u"%s=%s\n" % (option, replacevars[option]) old_values[option] = value if appendvars and option in appendvars: # append a new value unless it is already existing in the original one if not value: new_line = u"%s=%s\n" % (option, appendvars[option]) elif value.find(appendvars[option]) == -1: new_line = u"%s=%s %s\n" % (option, value, appendvars[option]) old_values[option] = value new_config.write(new_line) # We have finished parsing the original file. # There are two remaining cases: # 1. Section we were looking for was not found, we need to add it. if not (in_section or finished): new_config.write("[%s]\n" % (section)) # 2. The section is the last one but some options were not found, add them. if in_section or not finished: add_options(new_config, replacevars, appendvars, old_values) new_config.flush() # Make sure the resulting file is readable by others before installing it os.fchmod(new_config.fileno(), orig_stat.st_mode) os.fchown(new_config.fileno(), orig_stat.st_uid, orig_stat.st_gid) # At this point new_config is closed but not removed due to 'delete=False' above # Now, install the temporary file as configuration and ensure old version is available as .orig # While .orig file is not used during uninstall, it is left there for administrator. install_file(temp_filename, filepath) return old_values
def config_replace_variables(filepath, replacevars=dict(), appendvars=dict()): """ Take a key=value based configuration file, and write new version with certain values replaced or appended All (key,value) pairs from replacevars and appendvars that were not found in the configuration file, will be added there. It is responsibility of a caller to ensure that replacevars and appendvars do not overlap. It is responsibility of a caller to back up file. returns dictionary of affected keys and their previous values One have to run restore_context(filepath) afterwards or security context of the file will not be correct after modification """ pattern = re.compile( ''' (^ \s* (?P<option> [^\#;]+?) (\s*=\s*) (?P<value> .+?)? (\s*((\#|;).*)?)? $)''', re.VERBOSE) orig_stat = os.stat(filepath) old_values = dict() temp_filename = None with tempfile.NamedTemporaryFile(mode="w", delete=False) as new_config: temp_filename = new_config.name with open(filepath, 'r') as f: for line in f: new_line = line m = pattern.match(line) if m: option, value = m.group('option', 'value') if option is not None: if replacevars and option in replacevars: # replace value completely new_line = u"%s=%s\n" % (option, replacevars[option]) old_values[option] = value if appendvars and option in appendvars: # append new value unless it is already existing in the original one if not value: new_line = u"%s=%s\n" % (option, appendvars[option]) elif value.find(appendvars[option]) == -1: new_line = u"%s=%s %s\n" % (option, value, appendvars[option]) old_values[option] = value new_config.write(new_line) # Now add all options from replacevars and appendvars that were not found in the file new_vars = replacevars.copy() new_vars.update(appendvars) newvars_view = set(new_vars.keys()) - set(old_values.keys()) append_view = (set(appendvars.keys()) - newvars_view) for item in newvars_view: new_config.write("%s=%s\n" % (item, new_vars[item])) for item in append_view: new_config.write("%s=%s\n" % (item, appendvars[item])) new_config.flush() # Make sure the resulting file is readable by others before installing it os.fchmod(new_config.fileno(), orig_stat.st_mode) os.fchown(new_config.fileno(), orig_stat.st_uid, orig_stat.st_gid) # At this point new_config is closed but not removed due to 'delete=False' above # Now, install the temporary file as configuration and ensure old version is available as .orig # While .orig file is not used during uninstall, it is left there for administrator. install_file(temp_filename, filepath) return old_values
def write_safe( filename, func, mode='wb', prefix='tmp', subdir=None, permission=None, owner=None, utimes=None, fsync=False, ): """Safely write file. :param filename: full path of file :param func: what to do with the file descriptor, signature func(fd) :param mode: same as tempfile.NamedTemporaryFile :param prefix: same as tempfile.NamedTemporaryFile :param owner: file owner (uid, gui) tuple :param permission: file permission :param owner file owner (uid, gui) tuple :param subdir create tempdir in subdir :param utimes: If utimes is not None, it must be a tuple (atime, mtime); atime and mtime should be expressed as float seconds since the epoch. :param ``bool`` fsync: If True, call fsync to ensure all data is writen to disk. """ dirname = os.path.dirname(filename) if subdir: dirname = os.path.join(dirname, subdir) try: os.makedirs(dirname) except OSError as err: if err.errno != errno.EEXIST: raise try: tmpfile = None with tempfile.NamedTemporaryFile(dir=dirname, delete=False, prefix=prefix, mode=mode) as tmpfile: func(tmpfile) if permission is not None and os.name == 'posix': os.fchmod(tmpfile.fileno(), permission) if owner: uid, gid = owner os.fchown(tmpfile.fileno(), uid, gid) if fsync: tmpfile.flush() # Flush buffer then fsync, per docs. os.fsync(tmpfile.fileno()) if utimes is not None: if not fsync: tmpfile.flush() # Force a flush now. os.utime(tmpfile.name, times=utimes) replace(tmpfile.name, filename) finally: if tmpfile is not None: rm_safe(tmpfile.name)
# os.fchown() 方法用于修改一个文件的所有权,这个函数修改一个文件的用户ID和用户组ID,该文件由文件描述符fd指定。 # Unix上可用。 # 语法 # fchown()方法语法格式如下: # os.fchown(fd, uid, gid) # 参数 # fd -- 文件描述符 # uid -- 文件所有者的用户id # gid -- 文件所有者的用户组id import os, stat # 打开文件 "/tmp/foo.txt" fd = os.open("/tmp", os.O_RDONLY) # 设置文件的用户 id 为 100 os.fchown(fd, 100, -1) # 设置文件的用户组 id 为 100 os.fchown(fd, -1, 50) print("修改权限成功!!") # 关闭文件 os.close(fd) # os.fdatasync() 方法用于强制将文件写入磁盘,该文件由文件描述符fd指定,但是不强制更新文件的状态信息。如果你需要刷新缓冲区可以使用该方法。 # Unix上可用。 # 语法 # fdatasync()方法语法格式如下: # os.fdatasync(fd); # 参数
stderr=subprocess.PIPE) o, e = p.communicate() django_pass = o.strip() # Create the Django.my.cnf file django_conf_file = "/opt/stack/etc/django.my.cnf" f = open(django_conf_file, 'w+') f.write("""[client] user = django port = 40000 socket = /var/run/mysql/mysql.sock password = %s """ % django_pass) # Set owner and group to root:apache apache_gid = grp.getgrnam('apache')[2] os.fchown(f.fileno(), 0, apache_gid) f.close() # Password access for django cmd_set.append('create user "django"@"localhost" identified by "%s"' % django_pass) # Create the Django Database cmd_set.append('create database django') # Grant django user access to the Django database cmd_set.append('grant all on django.* to "django"@"localhost";') for cmd in cmd_set: try: db.execute(cmd)
for host in hosts: host = fsq_path.valid_name(host) # uid_gid makes calls to the pw db and|or gr db, in addition to # potentially stat'ing, as such, we want to avoid calling it # unless we absoultely have to uid, gid = uid_gid(user, group) tmp_full, tmp_queue = _tmp_trg(host, host_path) try: # open once to cut down on stat/open for chown/chmod combo fd = os.open(tmp_full, os.O_RDONLY) try: # always fchmod here as mkdtemp is different than normal # mkdir os.fchmod(fd, mode) if -1 != uid or -1 != gid: os.fchown(fd, uid, gid) finally: os.close(fd) # bless our queue with its children _instdir(fsq_path.tmp(trg_queue, tmp_queue), mode, uid, gid) _instdir(fsq_path.queue(trg_queue, tmp_queue), mode, uid, gid) _instdir(fsq_path.done(trg_queue, tmp_queue), mode, uid, gid) _instdir(fsq_path.fail(trg_queue, tmp_queue), mode, uid, gid) # down via configure.down if necessary if is_down: down_host(tmp_queue, host, user=item_user, group=item_group, mode=item_mode) # atomic commit -- by rename