def removeLinuxEnvironment(scope, name): assert scope in ('user', 'system') files = [] if os.geteuid() == 0: if len(os_user_name) > 0: files.append("%s/.bashrc" % (pwd.getpwnam(username).pw_dir)) files.append("%s/.bash_profile" % (pwd.getpwnam(username).pw_dir)) files.append("%s/.bash_profile" % (pwd.getpwnam(username).pw_dir)) else: files.extend(["~/.bashrc", "~/.bash_profile", "~/.bash_profile"]) for file in files: bodys = [] f = open(os.path.expanduser(file)) #INFO_MSG("find %s: %s" % (file, name)) for x in f.readlines(): if name in x: INFO_MSG("remove %s: %s" % (file, x)) continue bodys.append(x) f.close() f = open(os.path.expanduser(file), "w") f.writelines(bodys) f.close() if os.geteuid() != 0: syscommand('bash -c \'source %s\'' % file, False)
def check_requirements(self): req_ok = [] if SysAttribute.get_os() in self.req_supported_os: req_ok.append(True) else: req_ok.append(False) self.supported = False return False if SysAttribute.is_linux() and self.req_needs_root_linux: if os.geteuid()==0: req_ok.append(True) else: req_ok.append(False) elif SysAttribute.is_solaris() and self.req_needs_root_solaris: if os.geteuid()==0: req_ok.append(True) else: req_ok.append(False) if self.req_physical_hardware: if not self.is_virtual(): req_ok.append(True) else: req_ok.append(False) if False in req_ok: self.supported = False return False else: self.supported = True return True
def create_uninstaller(self): base = self.opts.staging_bindir if not os.access(base, os.W_OK) and getattr(sys, "frozen_path", False): base = sys.frozen_path dest = os.path.join(base, "calibre-uninstall") self.info("Creating un-installer:", dest) raw = UNINSTALL.format( python="/usr/bin/python", euid=os.geteuid(), manifest=self.manifest, icon_resources=self.icon_resources, mime_resources=self.mime_resources, menu_resources=self.menu_resources, appdata_resources=self.appdata_resources, frozen_path=getattr(sys, "frozen_path", None), ) try: with open(dest, "wb") as f: f.write(raw) os.chmod(dest, stat.S_IRWXU | stat.S_IRGRP | stat.S_IROTH) if os.geteuid() == 0: os.chown(dest, 0, 0) except: if self.opts.fatal_errors: raise self.task_failed("Creating uninstaller failed")
def setEnvironment(scope, name, value): assert scope in ('user', 'system') #INFO_MSG('set environment: name=%s, value=%s' % (name, value)) if platform.system() == 'Windows': root, subkey = getWindowsEnvironmentKey(scope) # Note: for 'system' scope, you must run this as Administrator key = winreg.OpenKey(root, subkey, 0, winreg.KEY_ALL_ACCESS) winreg.SetValueEx(key, name, 0, winreg.REG_EXPAND_SZ, value) winreg.CloseKey(key) else: if name.lower() == 'uid': uid, username = value if uid != str(os.geteuid()): ret, cret = syscommand('bash -c \'usermod -d /home/%s/ -u %s %s\'' % (pwd.getpwnam(username).pw_dir, uid, username), True) INFO_MSG(ret) INFO_MSG(cret) return userhome = "~" if len(os_user_name) > 0: userhome = pwd.getpwnam(os_user_name).pw_dir f = open('%s/.bash_profile' % userhome, 'a') f.write("export %s=%s\n\n" % (name, value)) f.close() if os.geteuid() > 0: syscommand('bash -c \'source %s/.bash_profile\'' % userhome, False)
def get_uid_gid(username): # Get the uid/gid from username to drop into post-startup if not username: return None, None try: pwrec = pwd.getpwnam(username) except KeyError: try: pwrec = pwd.getpwuid(int(username)) except (KeyError, ValueError): logs.append((logging.WARNING, 'Invalid or unknown daemon.effective_user: `%r`', username)) return None, None if os.geteuid() == 0: logs.append((logging.INFO, 'Found daemon.effective_user: %s (uid=%d).', pwrec[0], pwrec[2])) return pwrec[2], pwrec[3] if os.geteuid() == pwrec[2]: logs.append((logging.INFO, 'Already running as daemon.effective_user: %s (uid=%d).', pwrec[0], pwrec[2])) return None, None logs.append((logging.WARNING, 'Process owner is not root; daemon.effective_user ignored.'))
def test_mode(self): path = pjoin(self.dir, 'mode', 'mode') assert osutils.ensure_dirs(path, mode=0o700) self.check_dir(path, os.geteuid(), os.getegid(), 0o700) # unrestrict it osutils.ensure_dirs(path) self.check_dir(path, os.geteuid(), os.getegid(), 0o777)
def maybe_drop_privileges(uid=None, gid=None): """Change process privileges to new user/group. If UID and GID is specified, the real user/group is changed. If only UID is specified, the real user is changed, and the group is changed to the users primary group. If only GID is specified, only the group is changed. """ if sys.platform == 'win32': return if os.geteuid(): # no point trying to setuid unless we're root. if not os.getuid(): raise SecurityError('contact support') uid = uid and parse_uid(uid) gid = gid and parse_gid(gid) if uid: _setuid(uid, gid) else: gid and setgid(gid) if uid and not os.getuid() and not os.geteuid(): raise SecurityError('Still root uid after drop privileges!') if gid and not os.getgid() and not os.getegid(): raise SecurityError('Still root gid after drop privileges!')
def drop_privileges(user: pwd.struct_passwd, group: grp.struct_group, permanent: bool = True): """ Drop root privileges and change to something more safe. :param user: The tuple with user info :param group: The tuple with group info :param permanent: Whether we want to drop just the euid (temporary), or all uids (permanent) """ # Restore euid=0 if we have previously changed it if os.geteuid() != 0 and os.getuid() == 0: restore_privileges() if os.geteuid() != 0: raise RuntimeError("Not running as root: cannot change uid/gid to {}/{}".format(user.pw_name, group.gr_name)) # Remove group privileges os.setgroups([]) if permanent: os.setgid(group.gr_gid) os.setuid(user.pw_uid) else: os.setegid(group.gr_gid) os.seteuid(user.pw_uid) # Ensure a very conservative umask os.umask(0o077) if permanent: logger.debug("Permanently dropped privileges to {}/{}".format(user.pw_name, group.gr_name)) else: logger.debug("Dropped privileges to {}/{}".format(user.pw_name, group.gr_name))
def run_as_user(self, user_id, script_file): """Restarts this process with the same arguments as the specified user. This will re-run the entire Python script so that it is executing as the specified user. It will also add in the '--no-change-user' option which can be used by the script being executed with the next proces that it was the result of restart so that it probably shouldn't do that again. @param user_id: The user id to run as, typically 0 for root. @param script_file: The path to the Python script file that was executed. @type user_id: int @type script_file: str """ user_name = pwd.getpwuid(user_id).pw_name if os.geteuid() != user_id: if os.geteuid() != 0: print >>sys.stderr, ('Failing, cannot start scalyr_agent as correct user. The current user (%s) does ' 'not own the config file and cannot change to that user because ' 'not root.' % user_name) sys.exit(1) # Use sudo to re-execute this script with the correct user. We also pass in --no-change-user to prevent # us from re-executing the script again to change the user, to # head of any potential bugs that could cause infinite loops. arguments = ['sudo', '-u', user_name, sys.executable, script_file, '--no-change-user'] + sys.argv[1:] print >>sys.stderr, ('Running as %s' % user_name) os.execvp("sudo", arguments)
def drop_root(uid): if os.geteuid() > 0: logging.info('Did not start as root.') return os.setuid(uid) os.seteuid(uid) logging.info('UID=%d EUID=%d', os.getuid(), os.geteuid())
def GetFacter(self, open_fn=open): """Return facter contents. Args: open_fn: func, optional, supply an open() function Returns: dict, facter contents """ if self.facter_cache_path is None: return {} if not os.path.isfile(self.facter_cache_path): logging.info('GetFacter: facter cache file does not exist.') return {} facter = {} use_facter_cache = False try: st = os.stat(self.facter_cache_path) # if we are root, and the writer of the cache was not root, OR # if we are not root, the cache was not written by root, and # the cache was not written by ourselves if (os.geteuid() == 0 and st.st_uid != 0) or ( os.geteuid() != 0 and st.st_uid != 0 and os.geteuid() != st.st_uid): # don't trust this file. be paranoid. logging.info('GetFacter: Untrusted facter cache, ignoring') use_facter_cache = False else: use_facter_cache = True cache_mtime = datetime.datetime.fromtimestamp(st.st_mtime) logging.debug('GetFacter: facter cache mtime is %s', cache_mtime) except OSError, e: logging.info('GetFacter: OSError from os.stat(): %s', str(e)) use_facter_cache = False
def get_home_dir(self): ''' Gets the home dir and create it if it doesn't. ''' if self.gpg_home == None: self.gpg_home = self.GPG_HOME_DIR try: # create gpg's parent directories, if they don't already exist parent_dir = os.path.dirname(self.gpg_home) if not os.path.exists(parent_dir): statinfo = os.stat(os.path.dirname(parent_dir)) if statinfo.st_uid == os.geteuid(): os.makedirs(parent_dir, 0o770) self.log_message('created parent of home dir: {}'.format(parent_dir)) else: self.log_message('unable to create parent of home dir as {}: {}'.format(os.geteuid(), parent_dir)) # create gpg's home directory, if it doesn't exist already if not os.path.exists(self.gpg_home): statinfo = os.stat(os.path.dirname(self.gpg_home)) if statinfo.st_uid == os.geteuid(): os.mkdir(self.gpg_home, 0o700) self.log_message('created home dir: {}'.format(self.gpg_home)) except OSError: record_exception() self.log_message('EXCEPTION - see syr.exception.log for details') except Exception as exception: self.log_message(exception) self.log_message('EXCEPTION - see syr.exception.log for details') record_exception() return self.gpg_home
def set_permissions(self, **kwargs): '''set permission''' user = kwargs.get('user', self.settings.process_user) group = kwargs.get('group', self.settings.process_group) env_path = kwargs.get('path', self.virtualenv.dir) Colored.echo("Configuring permission on web application files", 'purple') mod_perms = [(self.settings.settings_file, 700), \ (self.media_root, 755), \ (os.path.join(self.settings.settings_dir, \ self.settings.project_name+'.db'), 700)] chown_cmd = ['sudo'] if os.geteuid() != 0 else [] chmod_cmd = ['sudo', 'chmod'] if os.geteuid() != 0 else [] chown_cmd.extend(['chown', '-R', '%s:%s'%(user, group), env_path]) try: subprocess.check_call(chown_cmd) for i in self.settings.apache_protected_locations: mod_perms.append((os.path.join(self.media_root, i), 700)) for i in mod_perms: path, perm = i if os.path.exists(path): subprocess.check_call(chmod_cmd+[str(perm), path]) except subprocess.CalledProcessError as _error: print _error
def run(self): self.mkpath(self.install_dir) for f in self.data_files: # it's a tuple with dict to install to and a list of files tdict = f[0] dir = convert_path(tdict['path']) if not os.path.isabs(dir): dir = os.path.join(self.install_dir, dir) elif self.root: dir = change_root(self.root, dir) self.mkpath(dir) os.chmod(dir, tdict['mode']) if(os.geteuid() == 0): try: uinfo = pwd.getpwnam(tdict['owner']) except KeyError: print "Error: Unkown user %s" % tdict['owner'] sys.exit(1) uid, gid = uinfo[2], uinfo[3] os.chown(dir, uid, gid) if f[1] == []: # If there are no files listed, the user must be # trying to create an empty directory, so add the # directory to the list of output files. self.outfiles.append(dir) else: # Copy files, adding them to the list of output files. for data, mode in f[1]: data = convert_path(data) (out, _) = self.copy_file(data, dir) self.outfiles.append(out) os.chmod(out, mode) if(os.geteuid() == 0): os.chown(out, uid, gid) self.run_command('install_locales')
def main(cli_args=sys.argv[1:]): """Command line argument parsing and main script execution.""" sys.excepthook = functools.partial(_handle_exception, args=None) # note: arg parser internally handles --help (and exits afterwards) plugins = plugins_disco.PluginsRegistry.find_all() parser, tweaked_cli_args = create_parser(plugins, cli_args) args = parser.parse_args(tweaked_cli_args) config = configuration.NamespaceConfig(args) zope.component.provideUtility(config) # Setup logging ASAP, otherwise "No handlers could be found for # logger ..." TODO: this should be done before plugins discovery for directory in config.config_dir, config.work_dir: le_util.make_or_verify_dir( directory, constants.CONFIG_DIRS_MODE, os.geteuid(), "--strict-permissions" in cli_args) # TODO: logs might contain sensitive data such as contents of the # private key! #525 le_util.make_or_verify_dir( args.logs_dir, 0o700, os.geteuid(), "--strict-permissions" in cli_args) setup_logging(args, _cli_log_handler, logfile='letsencrypt.log') # do not log `args`, as it contains sensitive data (e.g. revoke --key)! logger.debug("Arguments: %r", cli_args) logger.debug("Discovered plugins: %r", plugins) sys.excepthook = functools.partial(_handle_exception, args=args) # Displayer if args.text_mode: displayer = display_util.FileDisplay(sys.stdout) else: displayer = display_util.NcursesDisplay() zope.component.provideUtility(displayer) # Reporter report = reporter.Reporter() zope.component.provideUtility(report) atexit.register(report.atexit_print_messages) # TODO: remove developer EULA prompt for the launch if not config.eula: eula = pkg_resources.resource_string("letsencrypt", "EULA") if not zope.component.getUtility(interfaces.IDisplay).yesno( eula, "Agree", "Cancel"): raise errors.Error("Must agree to TOS") if not os.geteuid() == 0: logger.warning( "Root (sudo) is required to run most of letsencrypt functionality.") # check must be done after arg parsing as --help should work # w/o root; on the other hand, e.g. "letsencrypt run # --authenticator dns" or "letsencrypt plugins" does not # require root as well #return ( # "{0}Root is required to run letsencrypt. Please use sudo.{0}" # .format(os.linesep)) return args.func(args, config, plugins)
def main(argv): args = docopt(__doc__, argv=argv[1:], version=__version__) if args["-u"] and os.geteuid() != 0: stderr('Must be run as root with "-u".') return if not args["-u"] and os.geteuid() == 0: stderr('WARNING: Running as root without "-u".') password = None if args["--password"]: stderr("Password: "******"", flush=True) use_getpass = sys.stdin.isatty() or args["--getpass"] password = getpass("") if use_getpass else input() if not use_getpass: stderr("Received password.") bot = Shellbot( lines=int(args["-m"]), timeout=float(args["-t"]), prefix=args["-p"], queries=args["--queries"], user=args["-u"], cwd=args["-d"]) if args["--path"]: bot.runner.path += ":" + args["--path"] start(bot, args, password) while args["--loop"]: start(bot, args, password)
def main(cli_args=sys.argv[1:]): """Command line argument parsing and main script execution.""" # note: arg parser internally handles --help (and exits afterwards) plugins = plugins_disco.PluginsRegistry.find_all() args = create_parser(plugins, cli_args).parse_args(cli_args) config = configuration.NamespaceConfig(args) # Setup logging ASAP, otherwise "No handlers could be found for # logger ..." TODO: this should be done before plugins discovery for directory in config.config_dir, config.work_dir: le_util.make_or_verify_dir(directory, constants.CONFIG_DIRS_MODE, os.geteuid()) # TODO: logs might contain sensitive data such as contents of the # private key! #525 le_util.make_or_verify_dir(args.logs_dir, 0o700, os.geteuid()) _setup_logging(args) def handle_exception_common(): """Logs the exception and reraises it if in debug mode.""" logger.debug("Exiting abnormally", exc_info=True) if args.debug: raise try: return main2(cli_args, args, config, plugins) except errors.Error as error: handle_exception_common() return error except Exception: # pylint: disable=broad-except handle_exception_common() return "An unexpected error occured. Please see the logfiles in {0} " "for more details.".format(args.logs_dir)
def setupLogs(options): logger = logging.getLogger() if options.trace or options.logfile: loglevel = getattr(logging, options.loglevel.upper()) f = logging.Formatter('%(asctime)s %(filename)s %(levelname)s %(message)s', datefmt='%Y-%m-%dT%H:%M:%S') logger.setLevel(loglevel) if options.trace: s = logging.StreamHandler() s.setLevel(loglevel) s.setFormatter(f) logging.getLogger('').addHandler(s) if options.logfile: fh = logging.FileHandler(options.logfile) fh.setLevel(loglevel) fh.setFormatter(f) logging.getLogger('').addHandler(fh) logger.debug('workdir = {}'.format(options.workdir)) logger.debug('oedir = {}'.format(options.oedir)) logger.debug('svnloc = {}'.format(options.svnloc)) logger.debug('attemptsdir = {}'.format(options.attemptsdir)) logger.debug('uid = {} = {}'.format(os.getuid(), pwd.getpwuid(os.getuid()).pw_name)) logger.debug('euid = {} = {}'.format(os.geteuid(), pwd.getpwuid(os.geteuid()).pw_name)) logger.debug('gid = {} = {}'.format(os.getgid(), grp.getgrgid(os.getgid()).gr_name)) logger.debug('egid = {} = {}'.format(os.getegid(), grp.getgrgid(os.getegid()).gr_name)) return logger
def checkKeyPerm(keyfile): try: if not os.path.exists(keyfile): log.warn("launcher %s: the ssh key file %s does not exists" % (LauncherConfig().name, keyfile)) return False stats = os.stat(keyfile) except: log.warn("launcher %s: something goes wrong while performing stat() on %s !" % (LauncherConfig().name, keyfile)) return False if stats.st_uid != os.getuid(): log.debug( "launcher %s: ssh key '%s' owner is %s, my uid is %s, dropping the key from the keyring" % (LauncherConfig().name, keyfile, stats.st_uid, os.getuid()) ) return False if stats.st_uid != os.geteuid(): log.debug( "launcher %s: ssh key '%s' owner is %s, my euid is %s, dropping the key from the keyring" % (LauncherConfig().name, keyfile, stats.st_uid, os.geteuid()) ) return False if stat.S_IMODE(os.stat(keyfile).st_mode) & ~(stat.S_IRUSR | stat.S_IWUSR): # check if perm are at most rw------- log.debug( "launcher %s: ssh key '%s' perms are not *exactly* rw-------, dropping the key from the keyring" % (LauncherConfig().name, keyfile) ) return False return True
def set_euid(): """ Set settings.DROPLET_USER effective UID for the current process This adds some security, but nothing magic, an attacker can still gain root access, but at least we only elevate privileges when needed See root context manager """ current = os.geteuid() logger.debug("Current EUID is %s" % current) if settings.DROPLET_USER is None: logger.info("Not changing EUID, DROPLET_USER is None") return uid = int(pwd.getpwnam(settings.DROPLET_USER).pw_uid) if current != uid: try: os.seteuid(uid) logger.info("Set EUID to %s (%s)" % (settings.DROPLET_USER, os.geteuid())) except: current_user = pwd.getpwuid(os.getuid()).pw_name logger.error("Failed to set '%s' EUID, running as '%s'" % (settings.DROPLET_USER, current_user)) else: logger.debug("Didn't set EUID, it was already correct")
def access(filename,mode): if mode == os.F_OK: return exists(filename) st = stat(filename) filemode = st.st_mode uid = st.st_uid gid = st.st_gid if mode & os.R_OK: rOK = ( filemode & statconsts.S_IROTH ) or \ ( filemode & statconsts.S_IRGRP and os.getgid() == gid ) or \ ( filemode & statconsts.S_IRUSR and os.getuid() == uid ) or \ ( filemode & statconsts.S_ISGID and os.getegid() == gid ) or \ ( filemode & statconsts.S_ISUID and os.geteuid() == uid ) else: rOK = True if mode & os.W_OK: wOK = ( filemode & statconsts.S_IWOTH ) or \ ( filemode & statconsts.S_IWGRP and os.getgid() == gid ) or \ ( filemode & statconsts.S_IWUSR and os.getuid() == uid ) or \ ( filemode & statconsts.S_ISGID and os.getegid() == gid ) or \ ( filemode & statconsts.S_ISUID and os.geteuid() == uid ) else: wOK = True if mode & os.X_OK: xOK = ( filemode & statconsts.S_IXOTH ) or \ ( filemode & statconsts.S_IXGRP and os.getgid() == gid ) or \ ( filemode & statconsts.S_IXUSR and os.getuid() == uid ) or \ ( filemode & statconsts.S_ISGID and os.getegid() == gid ) or \ ( filemode & statconsts.S_ISUID and os.geteuid() == uid ) else: xOK = True return rOK and wOK and xOK
def test_executeAsUser_Unix(self): """ Test executing as a different user. """ initial_uid, initial_gid = os.geteuid(), os.getegid() initial_groups = os.getgroups() test_user = mk.getTestUser(u'normal') self.assertNotEqual( sorted(self.getGroupsIDForTestAccount()), sorted(os.getgroups()), ) with system_users.executeAsUser(username=test_user.name): import pwd import grp uid, gid = os.geteuid(), os.getegid() impersonated_username = pwd.getpwuid(uid)[0].decode('utf-8') impersonated_groupname = grp.getgrgid(gid)[0].decode('utf-8') impersonated_groups = os.getgroups() self.assertEqual(test_user.name, impersonated_username) self.assertEqual(TEST_ACCOUNT_GROUP, impersonated_groupname) self.assertNotEqual(initial_uid, uid) self.assertNotEqual(initial_gid, gid) self.assertNotEqual(initial_groups, impersonated_groups) if self.os_name != 'osx': # On OSX newer than 10.5 get/set groups are useless. self.assertEqual( sorted(self.getGroupsIDForTestAccount()), sorted(impersonated_groups), ) self.assertEqual(initial_uid, os.geteuid()) self.assertEqual(initial_gid, os.getegid()) self.assertEqual(initial_groups, os.getgroups())
def main(args=None): '''Control a mounted S3QL File System.''' if args is None: args = sys.argv[1:] options = parse_args(args) setup_logging(options) path = options.mountpoint if not os.path.exists(path): raise QuietError('Mountpoint %r does not exist' % path) ctrlfile = os.path.join(path, CTRL_NAME) if not (CTRL_NAME not in llfuse.listdir(path) and os.path.exists(ctrlfile)): raise QuietError('Mountpoint is not an S3QL file system') if os.stat(ctrlfile).st_uid != os.geteuid() and os.geteuid() != 0: raise QuietError('Only root and the mounting user may run s3qlctrl.') if options.action == 'flushcache': llfuse.setxattr(ctrlfile, 's3ql_flushcache!', 'dummy') if options.action == 'upload-meta': llfuse.setxattr(ctrlfile, 'upload-meta', 'dummy') elif options.action == 'log': llfuse.setxattr(ctrlfile, 'logging', pickle.dumps((options.level, options.modules), pickle.HIGHEST_PROTOCOL)) elif options.action == 'cachesize': llfuse.setxattr(ctrlfile, 'cachesize', pickle.dumps(options.cachesize*1024))
def set_auth_key( user, key, enc='ssh-rsa', comment='', options=None, config='.ssh/authorized_keys'): ''' Add a key to the authorized_keys file. The "key" parameter must only be the string of text that is the encoded key. If the key begins with "ssh-rsa" or ends with user@host, remove those from the key before passing it to this function. CLI Example:: salt '*' ssh.set_auth_key <user> '<key>' enc='dsa' ''' if len(key.split()) > 1: return 'invalid' enc = _refine_enc(enc) uinfo = __salt__['user.info'](user) if not uinfo: return 'fail' status = check_key(user, key, enc, comment, options, config) if status == 'update': _replace_auth_key(user, key, enc, comment, options or [], config) return 'replace' elif status == 'exists': return 'no change' else: auth_line = _format_auth_line(key, enc, comment, options) if not os.path.isdir(uinfo.get('home', '')): return 'fail' fconfig = os.path.join(uinfo['home'], config) if not os.path.isdir(os.path.dirname(fconfig)): dpath = os.path.dirname(fconfig) os.makedirs(dpath) if os.geteuid() == 0: os.chown(dpath, uinfo['uid'], uinfo['gid']) os.chmod(dpath, 448) if not os.path.isfile(fconfig): new_file = True else: new_file = False try: with salt.utils.fopen(fconfig, 'a+') as _fh: _fh.write('{0}'.format(auth_line)) except (IOError, OSError) as exc: msg = 'Could not write to key file: {0}' raise CommandExecutionError(msg.format(str(exc))) if new_file: if os.geteuid() == 0: os.chown(fconfig, uinfo['uid'], uinfo['gid']) os.chmod(fconfig, 384) return 'new'
def getUserName(UID=None): # ################################################################################################################### if not UID: UID = os.geteuid() user = pwd.getpwuid(os.geteuid()).pw_name return user
def test_get_ssh_dir(self): config = {} username = getpwuid(os.geteuid()).pw_name sshdir = os.path.join(getpwuid(os.geteuid()).pw_dir, '.ssh') if not os.path.isdir(sshdir): sshdir = None result = get_ssh_dir(config, username) self.assertEqual(result, sshdir)
def set_auth_key( user, key, enc='ssh-rsa', comment='', options=[], config='.ssh/authorized_keys'): ''' Add a key to the authorized_keys file. The "key" parameter must only be the string of text that is the encoded key. If the key begins with "ssh-rsa" or ends with user@host, remove those from the key before passing it to this function. CLI Example:: salt '*' ssh.set_auth_key <user> '<key>' enc='dsa' ''' if len(key.split()) > 1: return 'invalid' enc = _refine_enc(enc) uinfo = __salt__['user.info'](user) status = check_key(user, key, enc, comment, options, config) if status == 'update': _replace_auth_key( user, key, enc, comment, options, config) return 'replace' elif status == 'exists': return 'no change' else: auth_line = _format_auth_line( key, enc, comment, options) if not os.path.isdir(uinfo['home']): return 'fail' fconfig = os.path.join(uinfo['home'], config) if not os.path.isdir(os.path.dirname(fconfig)): dpath = os.path.dirname(fconfig) os.makedirs(dpath) if os.geteuid() == 0: os.chown(dpath, uinfo['uid'], uinfo['gid']) os.chmod(dpath, 448) if not os.path.isfile(fconfig): open(fconfig, 'a+').write('{0}'.format(auth_line)) if os.geteuid() == 0: os.chown(fconfig, uinfo['uid'], uinfo['gid']) os.chmod(fconfig, 384) else: open(fconfig, 'a+').write('{0}'.format(auth_line)) return 'new'
def format(): output = '' output += 'Effective UID: ' + str(os.geteuid()) + " (" + pwd.getpwuid(os.geteuid())[0] + ")" + '<br>' output += '__file__: ' + __file__ output += '<h1>Python Info</h1>\n' output += format_version() output += format_python_path() output += format_environ(os.environ) return output
def main(): banner() print "" if os.geteuid() != 0: print "\nNot running as root. Only logz accessible by non privileged users will be edited." raw_input("Press enter to get started...\n") validlogs() choice = raw_input("\nDo you want to search for more logs? [yes or no] ") if choice == "yes" or choice == "YES" or choice == "Yes": hollaback = 1 morelogz() elif choice == "no" or choice == "NO" or choice == "No": hollaback = 2 pass else: sys.exit("It's really not that difficult. Choose either yes or no f****r!\n") global hitme global ipaddr global host choice1 = raw_input("\nWould you like to remove a IP address or Hostname? [ip or hostname] ") if choice1 == "IP" or choice1 == "ip": hitme = 1 ipaddr = raw_input("\tWhat IP address would you like removed from the logs? ") while IPChk(ipaddr) != True: print "\n\tInvalid IP! Try again!" ipaddr = raw_input("\tWhat IP address would you like removed from the logs? ") else: if hollaback == 1: searchanddestroy() searchanddestroy2() else: searchanddestroy() elif choice1 == "HOSTNAME" or choice1 == "hostname": hitme = 2 host = raw_input("\tWhat Hostname would you like removed from the logs? ") if hollaback == 1: searchanddestroy() searchanddestroy2() else: searchanddestroy() else: sys.exit("It's really not that difficult. Choose either ip or hostname f****r!\n") if os.geteuid() == 0: w = open("/root/.bash_history", 'w') w.write("") w.close() else: user = os.getenv('USERNAME') w = open("/home/"+user+"/.bash_history", 'w') w.write("") w.close()
def __init__(self): self.app_name = 'srvApp' self.stdin_path = '/dev/null' self.stdout_path = '/dev/null' self.stderr_path = '/dev/null' self.pidfile_path = '/tmp/%s_%s.pid' % (self.app_name, os.geteuid()) self.pidfile_timeout = 5 self.log_file = '/tmp/%s_%s.log' % (self.app_name, os.geteuid()) self.foreground = False
def startup_checks(): # Splash print(" _ _ _ _ _ \n" + " | | | | (_) | | \n" + " _ _ ___| |__ | | _ _| | | \n" + " | | | |/___) _ \| |_/ ) | | | \n" + " | |_| |___ | |_) ) _ (| | | | \n" + " |____/(___/|____/|_| \_)_|\_)_)\n") # Check arguments args = sys.argv[1:] # Check for help if '-h' in args or '--help' in args: sys.exit(help_message) if '--version' in args: print('usbkill', __version__) sys.exit(0) copy_settings = False if '--cs' in args: args.remove('--cs') copy_settings = True shut_down = True if '--no-shut-down' in args: print( "[NOTICE] Ready to execute all the (potentially destructive) commands, but NOT shut down the computer." ) args.remove('--no-shut-down') shut_down = False # Check all other args if len(args) > 0: sys.exit("\n[ERROR] Argument not understood. Can only understand -h\n") # Check if program is run as root, else exit. # Root is needed to power off the computer. if not os.geteuid() == 0: sys.exit("\n[ERROR] This program needs to run as root.\n") # Warn the user if he does not have FileVault if CURRENT_PLATFORM.startswith("DARWIN"): try: # fdesetup return exit code 0 when true and 1 when false subprocess.check_output(["/usr/bin/fdesetup", "isactive"]) except subprocess.CalledProcessError: print( "[NOTICE] FileVault is disabled. Sensitive data SHOULD be encrypted." ) # On first time use copy usbkill.ini to /etc/usebkill.ini # If dev-mode, always copy and don't remove old settings if not os.path.isfile(SETTINGS_FILE) or copy_settings: sources_path = os.path.dirname(os.path.realpath(__file__)) if not os.path.isfile(os.path.join(sources_path, "install/usbkill.ini")): sys.exit( "\n[ERROR] You have lost your settings file. Get a new copy of the usbkill.ini and place it in /etc/ or in " + sources_path + "/\n") print("[NOTICE] Copying install/setting.ini to " + SETTINGS_FILE) os.system("cp " + sources_path + "install/usbkill.ini " + SETTINGS_FILE) # Load settings settings = load_settings(SETTINGS_FILE) settings['shut_down'] = shut_down # Make sure no spaces a present in paths to be wiped. for name in settings['folders_to_remove'] + settings['files_to_remove']: if ' ' in name: msg += "[ERROR][WARNING] '" + name + "'as specified in your usbkill.ini contains a space.\n" sys.exit(msg) # Make sure srm is present if it will be used. if settings['melt_usbkill'] or len(settings['folders_to_remove'] + settings['files_to_remove']) > 0: if not program_present('srm'): sys.exit( "[ERROR] usbkill configured to destroy data, but srm not installed.\n" ) if not settings['remove_file_cmd'].startswith('srm'): sys.exit( "[ERROR] remove_file_command should start with `srm'. srm should be used for automated data overwrite.\n" ) # Make sure sdmem is present if it will be used. if settings['do_wipe_ram']: if not program_present('sdmem'): sys.exit( "[ERROR] usbkill configured to destroy data, but srm not installed.\n" ) if not settings['wipe_ram_cmd'].startswith('sdmem'): sys.exit( "[ERROR] wipe_ram_cmd should start with `sdmem'. sdmem should be used for automated data overwrite.\n" ) # Make sure sswap is present if it will be used. if settings['do_wipe_swap']: if not program_present('sswap'): sys.exit( "[ERROR] usbkill configured to destroy data, but srm not installed.\n" ) if not settings['wipe_swap_cmd'].startswith('sswap'): sys.exit( "[ERROR] wipe_swap_cmd should start with `sswap'. sswap should be used for automated data overwrite.\n" ) # Make sure there is a logging folder log_folder = os.path.dirname(settings['log_file']) if not os.path.isdir(log_folder): os.mkdir(log_folder) return settings
def _get_metadata_proxy_user_group(cls, conf): user = conf.metadata_proxy_user or str(os.geteuid()) group = conf.metadata_proxy_group or str(os.getegid()) return user, group
def consutil(): """consutil - Command-line utility for interacting with switches via console device""" if os.geteuid() != 0: click.echo("Root privileges are required for this operation") sys.exit(1)
stdout=subprocess.PIPE, shell=True) print('Cleaning Downloads Dir') subprocess.run('rm -f ' + path, stdout=subprocess.PIPE, shell=True) def get_code(name): if name == 'phpstorm': code = 'PS' elif name == 'datagrip': code = 'DG' elif name == 'pycharm': code = 'PC' elif name == 'webstorm': code = 'WS' else: exit('unkown app name') return code if __name__ == '__main__': if (os.geteuid() != 0): exit('Need to be root') if len(sys.argv) < 2: exit('Need to pass in app name') main()
def some_security(self): if (os.geteuid() == 0 and not self.conf['allow_root_commands'] and self.conf['user'] == None): raise getmailConfigurationError( 'refuse to invoke external commands as root by default')
def _is_root(): try: return os.geteuid() == 0 except AttributeError: return False # assume not an admin on non-Unix platforms
def _docker_run(self, docker_image_name, cmd, user=None, env=None): "Runs a command and returns the return code or None if it timed out." errors = [] if user not in ('grade_oven', 'root', None): raise ValueError( 'User "{}" must be "grade_oven" or "root".'.format(user)) if env is None: env = {} docker_cmd = [ 'docker', 'run', '--hostname', 'gradeoven', '--memory', str(self.max_mem_bytes), # TODO: figure out why I need to set nproc so high # If I didn't set nproc > 500 docker wouldn't even start '--ulimit', 'nproc=1000:1000', '--ulimit', 'nice=19:19', '--ulimit', 'nofile={}:{}'.format(self.max_num_files, self.max_num_files), '--name', self.container_id, '--net', 'none', '--read-only=true', '--restart=no', '--detach', '--volume', u'{}/grade_oven:/grade_oven'.format(self.host_dir), '--volume', u'{}/tmp:/tmp'.format(self.host_dir), '--workdir', '/grade_oven/submission', '--cpu-shares', '128' ] # If a user is not specified, run as the effective user of this process. # If this code breaks, you can use 'grade_oven' in a --prod run but not # a --debug run. docker_cmd.extend(['--user', user or str(os.geteuid())]) for key, val in env.items(): docker_cmd.append('--env') try: val = six.text_type(val, errors='replace') except TypeError: pass docker_cmd.append('{}={}'.format(key, val.encode('utf-8'))) if user == 'root': docker_cmd.append('--volume') docker_cmd.append(u'{}/root:/root'.format(self.host_dir)) docker_cmd.append(docker_image_name) docker_cmd.extend(cmd) logging.info('Starting Docker container: %s', docker_cmd) proc = subprocess.Popen(docker_cmd, bufsize=-1, close_fds=True, cwd=self.host_dir, env={}) proc.wait() logging.info('Waiting for Docker container: %s', self.container_id) docker_cmd = [ 'timeout', str(self.timeout_seconds), 'docker', 'wait', self.container_id ] proc = subprocess.Popen(docker_cmd, stdout=subprocess.PIPE, bufsize=-1, close_fds=True, cwd=self.host_dir, env={}) return_code_raw, _ = proc.communicate() try: return_code = int(return_code_raw) except ValueError: errors.append( 'Command "{}" did not finish in {} seconds and timed out.'. format(join_cmd_parts(cmd), self.timeout_seconds)) return_code = None logging.info('Stopping Docker container: %s', self.container_id) docker_cmd = ['docker', 'stop', '--time', '5', self.container_id] proc = subprocess.Popen(docker_cmd, bufsize=-1, close_fds=True, cwd=self.host_dir, env={}) proc.wait() logging.info('Reading Docker logs from container: %s', self.container_id) docker_cmd = ['docker', 'logs', self.container_id] proc = subprocess.Popen(docker_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=4096, close_fds=True, cwd=self.host_dir, env={}) output, err = read_proc_summarized_stdout(proc, 4096) if err: errors.append(err) logging.info('Removing Docker container: %s', self.container_id) docker_cmd = ['docker', 'rm', '--force', self.container_id] proc = subprocess.Popen(docker_cmd, bufsize=-1, close_fds=True, cwd=self.host_dir, env={}) proc.wait() return return_code, output, errors
command = {cmd} ret = subprocess.call(command) import xml.etree.ElementTree as ElementTree, glob, re for file in glob.glob('logs/*/surefire.xml'): tree = ElementTree.parse(file) for suite in tree.findall('.//testsuite'): for test in suite.findall('testcase'): match = re.match('(init|end)_per_suite', test.attrib['name']) if match is not None: suite.remove(test) tree.write(file) sys.exit(ret) ''' command = command.format(uid=os.geteuid(), gid=os.getegid(), cmd=ct_command, shed_privileges=(platform.system() == 'Linux')) volumes = [] if os.path.isdir(expanduser('~/.docker')): volumes += [(expanduser('~/.docker'), '/tmp/docker_config', 'ro')] remove_dockers_and_volumes() ret = docker.run(tty=True, rm=True, interactive=True, workdir=os.path.join(script_dir, 'test_distributed'), volumes=volumes,
def checkPermission(): if os.geteuid() is 0: configureInterfaces() else: print("User must be root")
if log_level is not None: if not log.set_level(log_level): usage() log_file = os.path.normpath('/var/log/hp/hplip-uninstall.log') if os.getuid() != 0: log.error( "To run 'hp-uninstall' utility, you must have root privileges.(Try using 'sudo' or 'su -c')" ) sys.exit(1) if os.path.exists(log_file): os.remove(log_file) log.set_logfile(log_file) log.set_where(log.LOG_TO_CONSOLE_AND_FILE) log.debug("Log file=%s" % log_file) log.debug("euid = %d" % os.geteuid()) utils.log_title(__title__, __version__, True) log.info("Uninstaller log saved in: %s" % log.bold(log_file)) log.info("") core = CoreInstall(MODE_CHECK, INTERACTIVE_MODE) core.init() core.uninstall(mode)
def icmp_exfiltration_handler(url, http_request_method): # Check injection state settings.DETECTION_PHASE = True settings.EXPLOITATION_PHASE = False # You need to have root privileges to run this script if os.geteuid() != 0: err_msg = "You need to have root privileges to run this option." print(settings.print_critical_msg(err_msg) + "\n") os._exit(0) if http_request_method == "GET": #url = parameters.do_GET_check(url) request = _urllib.request.Request(url) headers.do_check(request) vuln_parameter = parameters.vuln_GET_param(url) else: parameter = menu.options.data parameter = _urllib.parse.unquote(parameter) parameter = parameters.do_POST_check(parameter) request = _urllib.request.Request(url, parameter) headers.do_check(request) vuln_parameter = parameters.vuln_POST_param(parameter, url) # Check if defined any HTTP Proxy. if menu.options.proxy: try: response = proxy.use_proxy(request) except _urllib.error.HTTPError as err_msg: if str(err_msg.code) == settings.INTERNAL_SERVER_ERROR: response = False elif settings.IGNORE_ERR_MSG == False: err = str(err_msg) + "." print("\n") + settings.print_critical_msg(err) continue_tests = checks.continue_tests(err_msg) if continue_tests == True: settings.IGNORE_ERR_MSG = True else: os._exit(0) # Check if defined Tor. elif menu.options.tor: try: response = tor.use_tor(request) except _urllib.error.HTTPError as err_msg: if str(err_msg.code) == settings.INTERNAL_SERVER_ERROR: response = False elif settings.IGNORE_ERR_MSG == False: err = str(err_msg) + "." print("\n") + settings.print_critical_msg(err) continue_tests = checks.continue_tests(err_msg) if continue_tests == True: settings.IGNORE_ERR_MSG = True else: os._exit(0) else: try: response = _urllib.request.urlopen(request, timeout=settings.TIMEOUT) except _urllib.error.HTTPError as err_msg: if str(err_msg.code) == settings.INTERNAL_SERVER_ERROR: response = False elif settings.IGNORE_ERR_MSG == False: err = str(err_msg) + "." print("\n") + settings.print_critical_msg(err) continue_tests = checks.continue_tests(err_msg) if continue_tests == True: settings.IGNORE_ERR_MSG = True else: os._exit(0) if settings.TARGET_OS == "win": err_msg = "This module's payloads are not suppoted by " err_msg += "the identified target operating system." print(settings.print_critical_msg(err_msg) + "\n") os._exit(0) else: technique = "ICMP exfiltration module" info_msg ="Loading the " + technique + ". \n" sys.stdout.write(settings.print_info_msg(info_msg)) sys.stdout.flush() ip_data = menu.options.ip_icmp_data # Source IP address ip_src = re.findall(r"ip_src=(.*),", ip_data) ip_src = ''.join(ip_src) # Destination IP address ip_dst = re.findall(r"ip_dst=(.*)", ip_data) ip_dst = ''.join(ip_dst) exploitation(ip_dst, ip_src, url, http_request_method, vuln_parameter, technique)
# Ejercicio 104: Obtener el ID de usuario, el ID de grupo, y grupos complementarios en Linux. import os print('El ID de usuario es: %d' % os.geteuid()) print('El ID de grupo es: %d' % os.getegid()) print('Los IDs de grupos complementarios son: {}'.format(str(os.getgroups())))
class PipStateTest(integration.ModuleCase, integration.SaltReturnAssertsMixIn): def setUp(self): super(PipStateTest, self).setUp() ret = self.run_function('cmd.has_exec', ['virtualenv']) if not ret: self.skipTest('virtualenv not installed') def test_pip_installed_errors(self): venv_dir = os.path.join(integration.SYS_TMP_DIR, 'pip-installed-errors') try: # Since we don't have the virtualenv created, pip.installed will # thrown and error. # Example error strings: # * "Error installing 'supervisor': /tmp/pip-installed-errors: not found" # * "Error installing 'supervisor': /bin/sh: 1: /tmp/pip-installed-errors: not found" # * "Error installing 'supervisor': /bin/bash: /tmp/pip-installed-errors: No such file or directory" os.environ['SHELL'] = '/bin/sh' ret = self.run_function('state.sls', mods='pip-installed-errors') self.assertSaltFalseReturn(ret) self.assertSaltCommentRegexpMatches( ret, 'Error installing \'supervisor\':(?:.*)' '/tmp/pip-installed-errors(?:.*)' '([nN]o such file or directory|not found)') # We now create the missing virtualenv ret = self.run_function('virtualenv.create', [venv_dir]) self.assertEqual(ret['retcode'], 0) # The state should not have any issues running now ret = self.run_function('state.sls', mods='pip-installed-errors') self.assertSaltTrueReturn(ret) finally: if os.path.isdir(venv_dir): shutil.rmtree(venv_dir) def test_pip_installed_weird_install(self): ographite = '/opt/graphite' if os.path.isdir(ographite): self.skipTest( 'You already have \'{0}\'. This test would overwrite this ' 'directory'.format(ographite)) try: os.makedirs(ographite) except OSError as err: if err.errno == 13: # Permission denied self.skipTest( 'You don\'t have the required permissions to run this test' ) finally: if os.path.isdir(ographite): shutil.rmtree(ographite) venv_dir = os.path.join(integration.SYS_TMP_DIR, 'pip-installed-weird-install') try: # Since we don't have the virtualenv created, pip.installed will # thrown and error. ret = self.run_function('state.sls', mods='pip-installed-weird-install') self.assertSaltTrueReturn(ret) # We cannot use assertInSaltComment here because we need to skip # some of the state return parts for key in ret.keys(): self.assertTrue(ret[key]['result']) if ret[key]['comment'] == 'Created new virtualenv': continue self.assertEqual( ret[key]['comment'], 'There was no error installing package \'carbon\' ' 'although it does not show when calling \'pip.freeze\'.') finally: if os.path.isdir(venv_dir): shutil.rmtree(venv_dir) if os.path.isdir('/opt/graphite'): shutil.rmtree('/opt/graphite') def test_issue_2028_pip_installed_state(self): ret = self.run_function('state.sls', mods='issue-2028-pip-installed') venv_dir = os.path.join(integration.SYS_TMP_DIR, 'issue-2028-pip-installed') try: self.assertSaltTrueReturn(ret) self.assertTrue( os.path.isfile(os.path.join(venv_dir, 'bin', 'supervisord'))) finally: if os.path.isdir(venv_dir): shutil.rmtree(venv_dir) def test_issue_2087_missing_pip(self): venv_dir = os.path.join(integration.SYS_TMP_DIR, 'issue-2087-missing-pip') try: # Let's create the testing virtualenv ret = self.run_function('virtualenv.create', [venv_dir]) self.assertEqual(ret['retcode'], 0) # Let's remove the pip binary pip_bin = os.path.join(venv_dir, 'bin', 'pip') if not os.path.isfile(pip_bin): self.skipTest( 'Failed to find the pip binary to the test virtualenv') os.remove(pip_bin) # Let's run the state which should fail because pip is missing ret = self.run_function('state.sls', mods='issue-2087-missing-pip') self.assertSaltFalseReturn(ret) self.assertInSaltComment( 'Error installing \'pep8\': Could not find a `pip` binary', ret) finally: if os.path.isdir(venv_dir): shutil.rmtree(venv_dir) def test_issue_5940_multiple_pip_mirrors(self): ret = self.run_function('state.sls', mods='issue-5940-multiple-pip-mirrors') venv_dir = os.path.join(integration.SYS_TMP_DIR, '5940-multiple-pip-mirrors') try: self.assertSaltTrueReturn(ret) self.assertTrue( os.path.isfile(os.path.join(venv_dir, 'bin', 'pep8'))) finally: if os.path.isdir(venv_dir): shutil.rmtree(venv_dir) @destructiveTest @skipIf(os.geteuid() != 0, 'you must be root to run this test') @with_system_account('issue-6912', on_existing='delete', delete=True) def test_issue_6912_wrong_owner(self, username): venv_dir = os.path.join(integration.SYS_TMP_DIR, '6912-wrong-owner') # ----- Using runas -------------------------------------------------> venv_create = self.run_function('virtualenv.create', [venv_dir], runas=username) if venv_create['retcode'] > 0: self.skipTest( 'Failed to create testcase virtual environment: {0}'.format( ret)) # Using the package name. try: ret = self.run_state('pip.installed', name='pep8', runas=username, bin_env=venv_dir) self.assertSaltTrueReturn(ret) uinfo = pwd.getpwnam(username) for globmatch in (os.path.join(venv_dir, '**', 'pep8*'), os.path.join(venv_dir, '*', '**', 'pep8*'), os.path.join(venv_dir, '*', '*', '**', 'pep8*')): for path in glob.glob(globmatch): self.assertEqual(uinfo.pw_uid, os.stat(path).st_uid) finally: if os.path.isdir(venv_dir): shutil.rmtree(venv_dir) # Using a requirements file venv_create = self.run_function('virtualenv.create', [venv_dir], runas=username) if venv_create['retcode'] > 0: self.skipTest( 'Failed to create testcase virtual environment: {0}'.format( ret)) req_filename = os.path.join(integration.TMP_STATE_TREE, 'issue-6912-requirements.txt') with open(req_filename, 'wb') as f: f.write('pep8') try: ret = self.run_state( 'pip.installed', name='', runas=username, bin_env=venv_dir, requirements='salt://issue-6912-requirements.txt') self.assertSaltTrueReturn(ret) uinfo = pwd.getpwnam(username) for globmatch in (os.path.join(venv_dir, '**', 'pep8*'), os.path.join(venv_dir, '*', '**', 'pep8*'), os.path.join(venv_dir, '*', '*', '**', 'pep8*')): for path in glob.glob(globmatch): self.assertEqual(uinfo.pw_uid, os.stat(path).st_uid) finally: if os.path.isdir(venv_dir): shutil.rmtree(venv_dir) os.unlink(req_filename) # <---- Using runas -------------------------------------------------- # ----- Using user --------------------------------------------------> venv_create = self.run_function('virtualenv.create', [venv_dir], runas=username) if venv_create['retcode'] > 0: self.skipTest( 'Failed to create testcase virtual environment: {0}'.format( ret)) # Using the package name try: ret = self.run_state('pip.installed', name='pep8', user=username, bin_env=venv_dir) self.assertSaltTrueReturn(ret) uinfo = pwd.getpwnam(username) for globmatch in (os.path.join(venv_dir, '**', 'pep8*'), os.path.join(venv_dir, '*', '**', 'pep8*'), os.path.join(venv_dir, '*', '*', '**', 'pep8*')): for path in glob.glob(globmatch): self.assertEqual(uinfo.pw_uid, os.stat(path).st_uid) finally: if os.path.isdir(venv_dir): shutil.rmtree(venv_dir) # Using a requirements file venv_create = self.run_function('virtualenv.create', [venv_dir], runas=username) if venv_create['retcode'] > 0: self.skipTest( 'Failed to create testcase virtual environment: {0}'.format( ret)) req_filename = os.path.join(integration.TMP_STATE_TREE, 'issue-6912-requirements.txt') with open(req_filename, 'wb') as f: f.write('pep8') try: ret = self.run_state( 'pip.installed', name='', user=username, bin_env=venv_dir, requirements='salt://issue-6912-requirements.txt') self.assertSaltTrueReturn(ret) uinfo = pwd.getpwnam(username) for globmatch in (os.path.join(venv_dir, '**', 'pep8*'), os.path.join(venv_dir, '*', '**', 'pep8*'), os.path.join(venv_dir, '*', '*', '**', 'pep8*')): for path in glob.glob(globmatch): self.assertEqual(uinfo.pw_uid, os.stat(path).st_uid) finally: if os.path.isdir(venv_dir): shutil.rmtree(venv_dir) os.unlink(req_filename) # <---- Using user --------------------------------------------------- def test_issue_6833_pip_upgrade_pip(self): # Create the testing virtualenv venv_dir = os.path.join(integration.TMP, '6833-pip-upgrade-pip') ret = self.run_function('virtualenv.create', [venv_dir]) try: try: self.assertEqual(ret['retcode'], 0) self.assertIn('New python executable', ret['stdout']) except AssertionError: import pprint pprint.pprint(ret) raise # Let's install a fixed version pip over whatever pip was # previously installed ret = self.run_function('pip.install', ['pip==1.3.1'], upgrade=True, ignore_installed=True, bin_env=venv_dir) try: self.assertEqual(ret['retcode'], 0) self.assertIn('Successfully installed pip', ret['stdout']) except AssertionError: import pprint pprint.pprint(ret) raise # Le't make sure we have pip 1.3.1 installed self.assertEqual( self.run_function('pip.list', ['pip'], bin_env=venv_dir), {'pip': '1.3.1'}) # Now the actual pip upgrade pip test ret = self.run_state('pip.installed', name='pip==1.4.1', upgrade=True, bin_env=venv_dir) try: self.assertSaltTrueReturn(ret) self.assertInSaltReturn('Installed', ret, ['changes', 'pip==1.4.1']) except AssertionError: import pprint pprint.pprint(ret) raise finally: if os.path.isdir(venv_dir): shutil.rmtree(venv_dir)
def create_runtime( self, env: MutableMapping[str, str], runtimeContext: RuntimeContext ) -> Tuple[List[str], Optional[str]]: any_path_okay = self.builder.get_requirement("DockerRequirement")[1] or False user_space_docker_cmd = runtimeContext.user_space_docker_cmd if user_space_docker_cmd: if "udocker" in user_space_docker_cmd and not runtimeContext.debug: runtime = [user_space_docker_cmd, "--quiet", "run"] # udocker 1.1.1 will output diagnostic messages to stdout # without this else: runtime = [user_space_docker_cmd, "run"] else: runtime = ["docker", "run", "-i"] self.append_volume( runtime, os.path.realpath(self.outdir), self.builder.outdir, writable=True ) tmpdir = "/tmp" # nosec self.append_volume( runtime, os.path.realpath(self.tmpdir), tmpdir, writable=True ) self.add_volumes( self.pathmapper, runtime, any_path_okay=True, secret_store=runtimeContext.secret_store, tmpdir_prefix=runtimeContext.tmpdir_prefix, ) if self.generatemapper is not None: self.add_volumes( self.generatemapper, runtime, any_path_okay=any_path_okay, secret_store=runtimeContext.secret_store, tmpdir_prefix=runtimeContext.tmpdir_prefix, ) if user_space_docker_cmd: runtime = [x.replace(":ro", "") for x in runtime] runtime = [x.replace(":rw", "") for x in runtime] runtime.append( "--workdir=%s" % (docker_windows_path_adjust(self.builder.outdir)) ) if not user_space_docker_cmd: if not runtimeContext.no_read_only: runtime.append("--read-only=true") if self.networkaccess: if runtimeContext.custom_net: runtime.append("--net={0}".format(runtimeContext.custom_net)) else: runtime.append("--net=none") if self.stdout is not None: runtime.append("--log-driver=none") euid, egid = docker_vm_id() if not onWindows(): # MS Windows does not have getuid() or geteuid() functions euid, egid = euid or os.geteuid(), egid or os.getgid() if runtimeContext.no_match_user is False and ( euid is not None and egid is not None ): runtime.append("--user=%d:%d" % (euid, egid)) if runtimeContext.rm_container: runtime.append("--rm") runtime.append("--env=TMPDIR=/tmp") # spec currently says "HOME must be set to the designated output # directory." but spec might change to designated temp directory. # runtime.append("--env=HOME=/tmp") runtime.append("--env=HOME=%s" % self.builder.outdir) # add parameters to docker to write a container ID file if runtimeContext.user_space_docker_cmd is None: if runtimeContext.cidfile_dir: cidfile_dir = runtimeContext.cidfile_dir if not os.path.exists(str(cidfile_dir)): _logger.error( "--cidfile-dir %s error:\n%s", cidfile_dir, "directory doesn't exist, please create it first", ) exit(2) if not os.path.isdir(cidfile_dir): _logger.error( "--cidfile-dir %s error:\n%s", cidfile_dir, cidfile_dir + " is not a directory, " "please check it first", ) exit(2) else: tmp_dir, tmp_prefix = os.path.split(runtimeContext.tmpdir_prefix) cidfile_dir = tempfile.mkdtemp(prefix=tmp_prefix, dir=tmp_dir) cidfile_name = datetime.datetime.now().strftime("%Y%m%d%H%M%S-%f") + ".cid" if runtimeContext.cidfile_prefix is not None: cidfile_name = str(runtimeContext.cidfile_prefix + "-" + cidfile_name) cidfile_path = os.path.join(cidfile_dir, cidfile_name) runtime.append("--cidfile=%s" % cidfile_path) else: cidfile_path = None for key, value in self.environment.items(): runtime.append("--env=%s=%s" % (key, value)) if runtimeContext.strict_memory_limit and not user_space_docker_cmd: runtime.append("--memory=%dm" % self.builder.resources["ram"]) elif not user_space_docker_cmd: res_req, _ = self.builder.get_requirement("ResourceRequirement") if res_req is not None and ("ramMin" in res_req or "ramMax" is res_req): _logger.warning( "[job %s] Skipping Docker software container '--memory' limit " "despite presence of ResourceRequirement with ramMin " "and/or ramMax setting. Consider running with " "--strict-memory-limit for increased portability " "assurance.", self.name, ) return runtime, cidfile_path
java_home = common_config.get("DEFAULT", "JAVA_HOME") hadoop_home = common_config.get("DEFAULT", "HADOOP_HOME") rsync_module = common_config.get("DEFAULT", "RSYNC_MODULE") assert (rsync_module != "") local_dir = config.get("DEFAULT", "LOCAL_DIR") assert (local_dir != "") fileno = config.get("DEFAULT", "FILENO") destno = config.get("DEFAULT", "DESTNO") host_groupnos = config.get("DEFAULT", "HOST_GROUPNO").split("\t") user = config.get("DEFAULT", "USER") group = config.get("DEFAULT", "GROUP") command_prefix = "" if os.geteuid() == 0: command_prefix = "sudo -u %s JAVA_HOME=%s " % (user, java_home) local_tmp_dir = os.path.join(local_dir, fileno) print local_tmp_dir if os.path.exists(local_tmp_dir): shutil.rmtree(local_tmp_dir) os.mkdir(local_tmp_dir) os.chown(local_tmp_dir, getpwnam(user).pw_uid, getpwnam(user).pw_gid) file = "part-%05d" % int(fileno) if check_interval < 0: check_interval = config.getint("DEFAULT", "CHECK_INTERVAL") if check_times < 0:
class UserTest(integration.ModuleCase, integration.SaltReturnAssertsMixIn): ''' test for user absent ''' def test_user_absent(self): ret = self.run_state('user.absent', name='unpossible') self.assertSaltTrueReturn(ret) def test_user_if_present(self): ret = self.run_state('user.present', name='nobody') self.assertSaltTrueReturn(ret) def test_user_if_present_with_gid(self): if self.run_function('group.info', ['nobody']): ret = self.run_state('user.present', name='nobody', gid='nobody') elif self.run_function('group.info', ['nogroup']): ret = self.run_state('user.present', name='nobody', gid='nogroup') else: self.skipTest( 'Neither \'nobody\' nor \'nogroup\' are valid groups') self.assertSaltTrueReturn(ret) @destructiveTest @skipIf(os.geteuid() != 0, 'you must be root to run this test') def test_user_not_present(self): ''' This is a DESTRUCTIVE TEST it creates a new user on the minion. And then destroys that user. Assume that it will break any system you run it on. ''' ret = self.run_state('user.present', name='salt_test') self.assertSaltTrueReturn(ret) ret = self.run_state('user.absent', name='salt_test') self.assertSaltTrueReturn(ret) @destructiveTest @skipIf(os.geteuid() != 0, 'you must be root to run this test') def test_user_present_when_home_dir_does_not_18843(self): ''' This is a DESTRUCTIVE TEST it creates a new user on the minion. And then destroys that user. Assume that it will break any system you run it on. ''' HOMEDIR = '/home/home_of_salt_test' ret = self.run_state('user.present', name='salt_test', home=HOMEDIR) self.assertSaltTrueReturn(ret) self.run_function('file.absent', name=HOMEDIR) ret = self.run_state('user.present', name='salt_test', home=HOMEDIR) self.assertSaltTrueReturn(ret) ret = self.run_state('user.absent', name='salt_test') self.assertSaltTrueReturn(ret) @destructiveTest @skipIf(os.geteuid() != 0, 'you must be root to run this test') def test_user_present_nondefault(self): ''' This is a DESTRUCTIVE TEST it creates a new user on the on the minion. ''' ret = self.run_state('user.present', name='salt_test', home='/var/lib/salt_test') self.assertSaltTrueReturn(ret) self.assertTrue(os.path.isdir('/var/lib/salt_test')) ret = self.run_state('user.absent', name='salt_test') self.assertSaltTrueReturn(ret) @destructiveTest @skipIf(os.geteuid() != 0, 'you must be root to run this test') @requires_system_grains def test_user_present_gid_from_name_default(self, grains=None): ''' This is a DESTRUCTIVE TEST. It creates a new user on the on the minion. This is an integration test. Not all systems will automatically create a group of the same name as the user, but I don't have access to any. If you run the test and it fails, please fix the code it's testing to work on your operating system. ''' ret = self.run_state('user.present', name='salt_test', gid_from_name=True, home='/var/lib/salt_test') self.assertSaltTrueReturn(ret) ret = self.run_function('user.info', ['salt_test']) self.assertReturnNonEmptySaltType(ret) group_name = grp.getgrgid(ret['gid']).gr_name self.assertTrue(os.path.isdir('/var/lib/salt_test')) if grains['os_family'] in ('Suse', ): self.assertEqual(group_name, 'users') else: self.assertEqual(group_name, 'salt_test') ret = self.run_state('user.absent', name='salt_test') self.assertSaltTrueReturn(ret) @destructiveTest @skipIf(os.geteuid() != 0, 'you must be root to run this test') def test_user_present_gid_from_name(self): ''' This is a DESTRUCTIVE TEST it creates a new user on the on the minion. This is a unit test, NOT an integration test. We create a group of the same name as the user beforehand, so it should all run smoothly. ''' ret = self.run_state('group.present', name='salt_test') self.assertSaltTrueReturn(ret) ret = self.run_state('user.present', name='salt_test', gid_from_name=True, home='/var/lib/salt_test') self.assertSaltTrueReturn(ret) ret = self.run_function('user.info', ['salt_test']) self.assertReturnNonEmptySaltType(ret) group_name = grp.getgrgid(ret['gid']).gr_name self.assertTrue(os.path.isdir('/var/lib/salt_test')) self.assertEqual(group_name, 'salt_test') ret = self.run_state('user.absent', name='salt_test') self.assertSaltTrueReturn(ret) ret = self.run_state('group.absent', name='salt_test') self.assertSaltTrueReturn(ret) @destructiveTest @skipIf(os.geteuid() != 0, 'you must be root to run this test') def test_user_present_gecos(self): ''' This is a DESTRUCTIVE TEST it creates a new user on the on the minion. It ensures that numeric GECOS data will be properly coerced to strings, otherwise the state will fail because the GECOS fields are written as strings (and show up in the user.info output as such). Thus the comparison will fail, since '12345' != 12345. ''' ret = self.run_state('user.present', name='salt_test', fullname=12345, roomnumber=123, workphone=1234567890, homephone=1234567890) self.assertSaltTrueReturn(ret) ret = self.run_state('user.absent', name='salt_test') self.assertSaltTrueReturn(ret) @destructiveTest @skipIf(os.geteuid() != 0, 'you must be root to run this test') def test_user_present_gecos_none_fields(self): ''' This is a DESTRUCTIVE TEST it creates a new user on the on the minion. It ensures that if no GECOS data is supplied, the fields will be coerced into empty strings as opposed to the string "None". ''' ret = self.run_state('user.present', name='salt_test', fullname=None, roomnumber=None, workphone=None, homephone=None) self.assertSaltTrueReturn(ret) ret = self.run_function('user.info', ['salt_test']) self.assertReturnNonEmptySaltType(ret) self.assertEqual('', ret['fullname']) self.assertEqual('', ret['roomnumber']) self.assertEqual('', ret['workphone']) self.assertEqual('', ret['homephone']) ret = self.run_state('user.absent', name='salt_test') self.assertSaltTrueReturn(ret)
def isRoot(): if not os.geteuid() == 0: sys.exit("{RED}[!] Installer must be run as root")
def __get_username(): """ Returns the effective username of the current process. """ if WINDOWS: return getpass.getuser() import pwd return pwd.getpwuid(os.geteuid()).pw_name
def main(cli_args=sys.argv[1:]): """Command line argument parsing and main script execution.""" sys.excepthook = functools.partial(_handle_exception, args=None) # note: arg parser internally handles --help (and exits afterwards) plugins = plugins_disco.PluginsRegistry.find_all() args = prepare_and_parse_args(plugins, cli_args) config = configuration.NamespaceConfig(args) zope.component.provideUtility(config) # Setup logging ASAP, otherwise "No handlers could be found for # logger ..." TODO: this should be done before plugins discovery for directory in config.config_dir, config.work_dir: le_util.make_or_verify_dir(directory, constants.CONFIG_DIRS_MODE, os.geteuid(), "--strict-permissions" in cli_args) # TODO: logs might contain sensitive data such as contents of the # private key! #525 le_util.make_or_verify_dir(args.logs_dir, 0o700, os.geteuid(), "--strict-permissions" in cli_args) setup_logging(args, _cli_log_handler, logfile='letsencrypt.log') logger.debug("letsencrypt version: %s", letsencrypt.__version__) # do not log `args`, as it contains sensitive data (e.g. revoke --key)! logger.debug("Arguments: %r", cli_args) logger.debug("Discovered plugins: %r", plugins) sys.excepthook = functools.partial(_handle_exception, args=args) # Displayer if args.text_mode: displayer = display_util.FileDisplay(sys.stdout) else: displayer = display_util.NcursesDisplay() zope.component.provideUtility(displayer) # Reporter report = reporter.Reporter() zope.component.provideUtility(report) atexit.register(report.atexit_print_messages) # TODO: remove developer preview prompt for the launch if not config.agree_dev_preview: disclaimer = pkg_resources.resource_string("letsencrypt", "DISCLAIMER") if not zope.component.getUtility(interfaces.IDisplay).yesno( disclaimer, "Agree", "Cancel"): raise Error("Must agree to TOS") if not os.geteuid() == 0: logger.warning( "Root (sudo) is required to run most of letsencrypt functionality." ) # check must be done after arg parsing as --help should work # w/o root; on the other hand, e.g. "letsencrypt run # --authenticator dns" or "letsencrypt plugins" does not # require root as well #return ( # "{0}Root is required to run letsencrypt. Please use sudo.{0}" # .format(os.linesep)) return args.func(args, config, plugins)
def main(argv=None): if os.geteuid() != 0: print("It looks like this command wasn't run with root privileges. Perhaps you didn't use `sudo -E`?") if argv is None: argv = sys.argv[1:] from .log import init_logging try: init_logging() except Exception as e: print(str(e)) print("Perhaps you didn't use `sudo -E`?") argparser = argparse.ArgumentParser() argparser.add_argument( '--config-path', default=CONFIG_FILE, help='Path to TLJH config.yaml file' ) subparsers = argparser.add_subparsers(dest='action') show_parser = subparsers.add_parser( 'show', help='Show current configuration' ) unset_parser = subparsers.add_parser( 'unset', help='Unset a configuration property' ) unset_parser.add_argument( 'key_path', help='Dot separated path to configuration key to unset' ) set_parser = subparsers.add_parser( 'set', help='Set a configuration property' ) set_parser.add_argument( 'key_path', help='Dot separated path to configuration key to set' ) set_parser.add_argument( 'value', help='Value to set the configuration key to' ) add_item_parser = subparsers.add_parser( 'add-item', help='Add a value to a list for a configuration property' ) add_item_parser.add_argument( 'key_path', help='Dot separated path to configuration key to add value to' ) add_item_parser.add_argument( 'value', help='Value to add to the configuration key' ) remove_item_parser = subparsers.add_parser( 'remove-item', help='Remove a value from a list for a configuration property' ) remove_item_parser.add_argument( 'key_path', help='Dot separated path to configuration key to remove value from' ) remove_item_parser.add_argument( 'value', help='Value to remove from key_path' ) reload_parser = subparsers.add_parser( 'reload', help='Reload a component to apply configuration change' ) reload_parser.add_argument( 'component', choices=('hub', 'proxy'), help='Which component to reload', default='hub', nargs='?' ) args = argparser.parse_args(argv) if args.action == 'show': show_config(args.config_path) elif args.action == 'set': set_config_value(args.config_path, args.key_path, parse_value(args.value)) elif args.action == 'unset': unset_config_value(args.config_path, args.key_path) elif args.action == 'add-item': add_config_value(args.config_path, args.key_path, parse_value(args.value)) elif args.action == 'remove-item': remove_config_value(args.config_path, args.key_path, parse_value(args.value)) elif args.action == 'reload': reload_component(args.component) else: argparser.print_help()
def execute(*cmd, **kwargs): """Helper method to shell out and execute a command through subprocess. Allows optional retry. :param cmd: Passed to subprocess.Popen. :type cmd: string :param process_input: Send to opened process. :type process_input: string :param env_variables: Environment variables and their values that will be set for the process. :type env_variables: dict :param check_exit_code: Single bool, int, or list of allowed exit codes. Defaults to [0]. Raise :class:`ProcessExecutionError` unless program exits with one of these code. :type check_exit_code: boolean, int, or [int] :param delay_on_retry: True | False. Defaults to True. If set to True, wait a short amount of time before retrying. :type delay_on_retry: boolean :param attempts: How many times to retry cmd. :type attempts: int :param run_as_root: True | False. Defaults to False. If set to True, the command is prefixed by the command specified in the root_helper kwarg. :type run_as_root: boolean :param root_helper: command to prefix to commands called with run_as_root=True :type root_helper: string :param shell: whether or not there should be a shell used to execute this command. Defaults to false. :type shell: boolean :param loglevel: log level for execute commands. :type loglevel: int. (Should be logging.DEBUG or logging.INFO) :returns: (stdout, stderr) from process execution :raises: :class:`UnknownArgumentError` on receiving unknown arguments :raises: :class:`ProcessExecutionError` """ process_input = kwargs.pop('process_input', None) env_variables = kwargs.pop('env_variables', None) check_exit_code = kwargs.pop('check_exit_code', [0]) ignore_exit_code = False delay_on_retry = kwargs.pop('delay_on_retry', True) attempts = kwargs.pop('attempts', 1) run_as_root = kwargs.pop('run_as_root', False) root_helper = kwargs.pop('root_helper', '') shell = kwargs.pop('shell', False) loglevel = kwargs.pop('loglevel', logging.DEBUG) if isinstance(check_exit_code, bool): ignore_exit_code = not check_exit_code check_exit_code = [0] elif isinstance(check_exit_code, int): check_exit_code = [check_exit_code] if kwargs: raise UnknownArgumentError(_('Got unknown keyword args: %r') % kwargs) if run_as_root and hasattr(os, 'geteuid') and os.geteuid() != 0: if not root_helper: raise NoRootWrapSpecified( message=_('Command requested root, but did not ' 'specify a root helper.')) cmd = shlex.split(root_helper) + list(cmd) cmd = map(str, cmd) sanitized_cmd = strutils.mask_password(' '.join(cmd)) while attempts > 0: attempts -= 1 try: LOG.log(loglevel, _('Running cmd (subprocess): %s'), sanitized_cmd) _PIPE = subprocess.PIPE # pylint: disable=E1101 if os.name == 'nt': preexec_fn = None close_fds = False else: preexec_fn = _subprocess_setup close_fds = True obj = subprocess.Popen(cmd, stdin=_PIPE, stdout=_PIPE, stderr=_PIPE, close_fds=close_fds, preexec_fn=preexec_fn, shell=shell, env=env_variables) result = None for _i in six.moves.range(20): # NOTE(russellb) 20 is an arbitrary number of retries to # prevent any chance of looping forever here. try: if process_input is not None: result = obj.communicate(process_input) else: result = obj.communicate() except OSError as e: if e.errno in (errno.EAGAIN, errno.EINTR): continue raise break obj.stdin.close() # pylint: disable=E1101 _returncode = obj.returncode # pylint: disable=E1101 LOG.log(loglevel, 'Result was %s' % _returncode) if not ignore_exit_code and _returncode not in check_exit_code: (stdout, stderr) = result sanitized_stdout = strutils.mask_password(stdout) sanitized_stderr = strutils.mask_password(stderr) raise ProcessExecutionError(exit_code=_returncode, stdout=sanitized_stdout, stderr=sanitized_stderr, cmd=sanitized_cmd) return result except ProcessExecutionError: if not attempts: raise else: LOG.log(loglevel, _('%r failed. Retrying.'), sanitized_cmd) if delay_on_retry: greenthread.sleep(random.randint(20, 200) / 100.0) finally: # NOTE(termie): this appears to be necessary to let the subprocess # call clean something up in between calls, without # it two execute calls in a row hangs the second one greenthread.sleep(0)
def cli_init(ctx): if os.geteuid() != ROOT_UID: cli_abort(ctx, "Root privileges are required") ctx.ensure_object(dict)
def set_known_host( user=None, hostname=None, fingerprint=None, key=None, port=None, enc=None, config=None, hash_known_hosts=True, timeout=5, fingerprint_hash_type=None, ): """ Download SSH public key from remote host "hostname", optionally validate its fingerprint against "fingerprint" variable and save the record in the known_hosts file. If such a record does already exists in there, do nothing. user The user who owns the ssh authorized keys file to modify hostname The name of the remote host (e.g. "github.com") fingerprint The fingerprint of the key which must be present in the known_hosts file (optional if key specified) key The public key which must be presented in the known_hosts file (optional if fingerprint specified) port optional parameter, denoting the port of the remote host, which will be used in case, if the public key will be requested from it. By default the port 22 is used. enc Defines what type of key is being used, can be ed25519, ecdsa ssh-rsa or ssh-dss config The location of the authorized keys file relative to the user's home directory, defaults to ".ssh/known_hosts". If no user is specified, defaults to "/etc/ssh/ssh_known_hosts". If present, must be an absolute path when a user is not specified. hash_known_hosts : True Hash all hostnames and addresses in the known hosts file. timeout : int Set the timeout for connection attempts. If ``timeout`` seconds have elapsed since a connection was initiated to a host or since the last time anything was read from that host, then the connection is closed and the host in question considered unavailable. Default is 5 seconds. .. versionadded:: 2016.3.0 fingerprint_hash_type The public key fingerprint hash type that the public key fingerprint was originally hashed with. This defaults to ``sha256`` if not specified. .. versionadded:: 2016.11.4 .. versionchanged:: 2017.7.0: default changed from ``md5`` to ``sha256`` CLI Example: .. code-block:: bash salt '*' ssh.set_known_host <user> fingerprint='xx:xx:..:xx' enc='ssh-rsa' config='.ssh/known_hosts' """ if not hostname: return {"status": "error", "error": "hostname argument required"} if port is not None and port != DEFAULT_SSH_PORT and hash_known_hosts: return { "status": "error", "error": "argument port can not be used in " "conjunction with argument hash_known_hosts", } update_required = False check_required = False stored_host_entries = get_known_host_entries( user, hostname, config=config, port=port, fingerprint_hash_type=fingerprint_hash_type, ) stored_keys = [h["key"] for h in stored_host_entries] if stored_host_entries else [] stored_fingerprints = ( [h["fingerprint"] for h in stored_host_entries] if stored_host_entries else [] ) if not stored_host_entries: update_required = True elif fingerprint and fingerprint not in stored_fingerprints: update_required = True elif key and key not in stored_keys: update_required = True elif key is None and fingerprint is None: check_required = True if not update_required and not check_required: return {"status": "exists", "keys": stored_keys} if not key: remote_host_entries = recv_known_host_entries( hostname, enc=enc, port=port, hash_known_hosts=hash_known_hosts, timeout=timeout, fingerprint_hash_type=fingerprint_hash_type, ) # pylint: disable=not-an-iterable known_keys = ( [h["key"] for h in remote_host_entries] if remote_host_entries else [] ) known_fingerprints = ( [h["fingerprint"] for h in remote_host_entries] if remote_host_entries else [] ) # pylint: enable=not-an-iterable if not remote_host_entries: return {"status": "error", "error": "Unable to receive remote host keys"} if fingerprint and fingerprint not in known_fingerprints: return { "status": "error", "error": ( "Remote host public keys found but none of their " "fingerprints match the one you have provided" ), } if check_required: for key in known_keys: if key in stored_keys: return {"status": "exists", "keys": stored_keys} full = _get_known_hosts_file(config=config, user=user) if isinstance(full, dict): return full if os.path.isfile(full): origmode = os.stat(full).st_mode # remove existing known_host entry with matching hostname and encryption key type # use ssh-keygen -F to find the specific line(s) for this host + enc combo ssh_hostname = _hostname_and_port_to_ssh_hostname(hostname, port) cmd = ["ssh-keygen", "-F", ssh_hostname, "-f", full] lines = __salt__["cmd.run"]( cmd, ignore_retcode=True, python_shell=False ).splitlines() remove_lines = list(_get_matched_host_line_numbers(lines, enc)) if remove_lines: try: with salt.utils.files.fopen(full, "r+") as ofile: known_hosts_lines = salt.utils.data.decode(list(ofile)) # Delete from last line to first to avoid invalidating earlier indexes for line_no in sorted(remove_lines, reverse=True): del known_hosts_lines[line_no - 1] # Write out changed known_hosts file ofile.seek(0) ofile.truncate() ofile.writelines( salt.utils.data.decode(known_hosts_lines, to_str=True) ) except OSError as exception: raise CommandExecutionError( "Couldn't remove old entry(ies) from known hosts file: '{}'".format( exception ) ) else: origmode = None # set up new value if key: remote_host_entries = [{"hostname": hostname, "enc": enc, "key": key}] lines = [] for entry in remote_host_entries: if ( hash_known_hosts or port in [DEFAULT_SSH_PORT, None] or ":" in entry["hostname"] ): line = "{hostname} {enc} {key}\n".format(**entry) else: entry["port"] = port line = "[{hostname}]:{port} {enc} {key}\n".format(**entry) lines.append(line) # ensure ~/.ssh exists ssh_dir = os.path.dirname(full) if user: uinfo = __salt__["user.info"](user) try: log.debug('Ensuring ssh config dir "%s" exists', ssh_dir) os.makedirs(ssh_dir) except OSError as exc: if exc.args[1] == "Permission denied": log.error("Unable to create directory %s: " "%s", ssh_dir, exc.args[1]) elif exc.args[1] == "File exists": log.debug("%s already exists, no need to create " "it", ssh_dir) else: # set proper ownership/permissions if user: os.chown(ssh_dir, uinfo["uid"], uinfo["gid"]) os.chmod(ssh_dir, 0o700) # write line to known_hosts file try: with salt.utils.files.fopen(full, "ab") as ofile: ofile.writelines(salt.utils.data.encode(lines)) except OSError as exception: raise CommandExecutionError( "Couldn't append to known hosts file: '{}'".format(exception) ) if not salt.utils.platform.is_windows(): if os.geteuid() == 0 and user: os.chown(full, uinfo["uid"], uinfo["gid"]) if origmode: os.chmod(full, origmode) else: os.chmod(full, 0o600) if key and hash_known_hosts: cmd_result = __salt__["ssh.hash_known_hosts"](user=user, config=full) rval = {"status": "updated", "old": stored_host_entries, "new": remote_host_entries} return rval
#!/usr/bin/env python2.7 import os import time if os.geteuid() != 0: exit("You isn't root, is it.") vhost = raw_input('Enter name of new vhost to setup: ') vhost_path = "/Users/jim/www/" vhost_dir = vhost_path + vhost + "/public_html/" print "Ok attempting to setup [%s] as a new vhost in dir [%s]" % (vhost, vhost_dir) #os.makedirs(vhost_dir) os.mkdir(vhost_path + vhost + "/logs/") os.system("chown -R jim:staff " + vhost_path + vhost) print "New vhost directory should be listed below:" os.system('ls -l ' + vhost_path) print "Writing apache conf file..." fd = open("/etc/apache2/vhosts/default.local", "r") fn = open("/etc/apache2/vhosts/" + vhost + ".conf", "w") for line in fd: fn.write(line.replace('hostname', vhost)) fd.close()
def main(): """Main.""" if os.geteuid() == 0: # root privileges are not needed sys.exit("Please do not run this program as root.") args = parse_args() if args.store: tc_db_table_name = "zephyr_" + str(args.board) else: tc_db_table_name = None callback_thread = autoptsclient.init_core() ptses = [] for ip, local in zip(args.ip_addr, args.local_addr): ptses.append( autoptsclient.init_pts(ip, args.workspace, args.bd_addr, args.enable_max_logs, callback_thread, tc_db_table_name, local)) btp.init(get_iut) autoprojects.iutctl.init(args.kernel_image, args.tty_file, args.board) stack.init_stack() stack_inst = stack.get_stack() stack_inst.synch_init(callback_thread.set_pending_response, callback_thread.clear_pending_responses) # Setup project PIXITS autoprojects.gap.set_pixits(ptses[0]) autoprojects.gatt.set_pixits(ptses[0]) autoprojects.sm.set_pixits(ptses[0]) autoprojects.l2cap.set_pixits(ptses[0]) if len(ptses) >= 2: autoprojects.mesh.set_pixits(ptses) test_cases = autoprojects.gap.test_cases(ptses[0]) test_cases += autoprojects.gatt.test_cases(ptses[0]) test_cases += autoprojects.sm.test_cases(ptses[0]) test_cases += autoprojects.l2cap.test_cases(ptses[0]) if len(ptses) >= 2: mesh_test_cases, additional_mesh_test_cases \ = autoprojects.mesh.test_cases(ptses) test_cases += mesh_test_cases additional_test_cases = additional_mesh_test_cases else: additional_test_cases = [] if args.test_cases or args.excluded: test_cases = autoptsclient.get_test_cases_subset( test_cases, args.test_cases, args.excluded) autoptsclient.run_test_cases(ptses, test_cases, additional_test_cases, args.retry) autoprojects.iutctl.cleanup() print "\nBye!" sys.stdout.flush() for pts in ptses: pts.unregister_xmlrpc_ptscallback() # not the cleanest but the easiest way to exit the server thread os._exit(0)
def set_auth_key( user, key, enc="ssh-rsa", comment="", options=None, config=".ssh/authorized_keys", cache_keys=None, fingerprint_hash_type=None, ): """ Add a key to the authorized_keys file. The "key" parameter must only be the string of text that is the encoded key. If the key begins with "ssh-rsa" or ends with user@host, remove those from the key before passing it to this function. CLI Example: .. code-block:: bash salt '*' ssh.set_auth_key <user> '<key>' enc='dsa' """ if cache_keys is None: cache_keys = [] if len(key.split()) > 1: return "invalid" enc = _refine_enc(enc) uinfo = __salt__["user.info"](user) if not uinfo: return "fail" # A 'valid key' to us pretty much means 'decodable as base64', which is # the same filtering done when reading the authorized_keys file. Apply # the same check to ensure we don't insert anything that will not # subsequently be read) key_is_valid = _fingerprint(key, fingerprint_hash_type) is not None if not key_is_valid: return "Invalid public key" status = check_key( user, key, enc, comment, options, config=config, cache_keys=cache_keys, fingerprint_hash_type=fingerprint_hash_type, ) if status == "update": _replace_auth_key(user, key, enc, comment, options or [], config) return "replace" elif status == "exists": return "no change" else: auth_line = _format_auth_line(key, enc, comment, options) fconfig = _get_config_file(user, config) # Fail if the key lives under the user's homedir, and the homedir # doesn't exist udir = uinfo.get("home", "") if fconfig.startswith(udir) and not os.path.isdir(udir): return "fail" if not os.path.isdir(os.path.dirname(fconfig)): dpath = os.path.dirname(fconfig) os.makedirs(dpath) if not salt.utils.platform.is_windows(): if os.geteuid() == 0: os.chown(dpath, uinfo["uid"], uinfo["gid"]) os.chmod(dpath, 448) # If SELINUX is available run a restorecon on the file rcon = salt.utils.path.which("restorecon") if rcon: cmd = [rcon, dpath] subprocess.call(cmd) if not os.path.isfile(fconfig): new_file = True else: new_file = False try: with salt.utils.files.fopen(fconfig, "ab+") as _fh: if new_file is False: # Let's make sure we have a new line at the end of the file _fh.seek(0, 2) if _fh.tell() > 0: # File isn't empty, check if last byte is a newline # If not, add one _fh.seek(-1, 2) if _fh.read(1) != b"\n": _fh.write(b"\n") _fh.write(salt.utils.stringutils.to_bytes(auth_line)) except OSError as exc: msg = "Could not write to key file: {0}" raise CommandExecutionError(msg.format(exc)) if new_file: if not salt.utils.platform.is_windows(): if os.geteuid() == 0: os.chown(fconfig, uinfo["uid"], uinfo["gid"]) os.chmod(fconfig, 384) # If SELINUX is available run a restorecon on the file rcon = salt.utils.path.which("restorecon") if rcon: cmd = [rcon, fconfig] subprocess.call(cmd) return "new"
def check_root(): if hasattr(os, "getuid"): if os.geteuid() == 0: console.logger.info("streamlink is running as root! Be careful!")
group.add_argument('-a', '--all', dest='nukeitall', action='store_true', default=False, help='Remove all documents from all indices.') parser.add_argument( '-r', '--reload', dest='reload', action='store_true', default=False, help='Reload source files from SOF-ELK(R) filesystem. Requires "-f".') args = parser.parse_args() if args.reload and os.geteuid() != 0: print "Reload functionality requires administrative privileges. Run with 'sudo'." exit(1) # create Elasticsearch handle es = Elasticsearch([{'host': 'localhost', 'port': 9200}]) # get list of top-level indices if requested if args.index == 'list': populated_indices = get_es_indices(es) if len(populated_indices) == 0: print 'There are no active data indices in Elasticsearch' else: print 'The following indices are currently active in Elasticsearch:' for index in populated_indices: print '- %s' % (index)
BASE_INFO.update(getattr(fun, n)) os.chdir(SITES_HOME) except: print('trying to import base_info from %s/config failed' % SITES_HOME) sys.exit() DATA_HOME = BASE_INFO['erp_server_data_path'] #docker run -it -v /home/robert/odoo_projects_data:/mnt/sites -v /home/robert/erp_workbench/dumper/:/mnt/sites/dumper --rm=true --link db:db dbdumper -s dd = { 'data_home': DATA_HOME, 'sites_home': SITES_HOME, 'cmd': dumper_cmd, 'dbname': dbname, 'runsudo': '' } if not os.geteuid() == 0: dd['runsudo'] = 'sudo' if dumper_cmd in ['-h', '-s']: cmd_line = '%(runsudo)s docker run -v %(data_home)s:/mnt/sites -v %(sites_home)s/dumper/:/mnt/sites/dumper --rm=true --link db:db dbdumper %(cmd)s' % dd else: cmd_line = '%(runsudo)s docker run -v %(data_home)s:/mnt/sites -v %(sites_home)s/dumper/:/mnt/sites/dumper --rm=true --link db:db dbdumper %(cmd)s %(dbname)s' % dd (DATA_HOME, SITES_HOME, dumper_cmd, dbname) if verbose: print('--------- rundocker start ----------') print(cmd_line) cmd_line = cmd_line + ' -v' p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
def check_user(self): if os.geteuid() == 0: self.msg_box( "This is the root user. Please use the ordinary user to run this program" ) gtk.main_quit()