def _checkRunningConfig(self, job = None): inFile = None with dissomniag.rootContext(): try: with open(dissomniag.config.git.pathToConfigFile, 'r') as f: inFile = f.read() except Exception: pass config = self._getNewConfig(job) actualConfig = StringIO.StringIO() config.write(actualConfig) actualConfig = actualConfig.getvalue() inHash = hashlib.sha256() inHash.update(inFile) actHash = hashlib.sha256() actHash.update(actualConfig) if not ((self._getConfigKeySet(config, job) <= self._getHdKeySet(job)) and (self._getConfigKeySet(config, job) <= self._getHdKeySet(job))): log.info("HD Keys and Config Keys differ!") return False grp.getgrnam(str(dissomniag.config.git.gitGroup)).gr_gid if inHash.hexdigest() == actHash.hexdigest(): return True else: return False
def test_removed_mc(ldap_conn, sanity_rfc2307): """ Regression test for ticket: https://fedorahosted.org/sssd/ticket/2726 """ ent.assert_passwd_by_name( 'user1', dict(name='user1', passwd='*', uid=1001, gid=2001, gecos='1001', shell='/bin/bash')) ent.assert_passwd_by_uid( 1001, dict(name='user1', passwd='*', uid=1001, gid=2001, gecos='1001', shell='/bin/bash')) ent.assert_group_by_name("group1", dict(name="group1", gid=2001)) ent.assert_group_by_gid(2001, dict(name="group1", gid=2001)) stop_sssd() # remove cache without invalidation for path in os.listdir(config.MCACHE_PATH): os.unlink(config.MCACHE_PATH + "/" + path) # sssd is stopped; so the memory cache should not be used # in long living clients (py.test in this case) with pytest.raises(KeyError): pwd.getpwnam('user1') with pytest.raises(KeyError): pwd.getpwuid(1001) with pytest.raises(KeyError): grp.getgrnam('group1') with pytest.raises(KeyError): grp.getgrgid(2001)
def create(self): """ Create a user on the system who will be named after the self.name with its own group and directory. Returns: True: if the user creation went right """ # XXX: This method shall be no-op in case if all is correctly setup # This method shall check if all is correctly done # This method shall not reset groups, just add them try: grp.getgrnam(self.name) except KeyError: callAndRead(['groupadd', self.name]) user_parameter_list = ['-d', self.path, '-g', self.name, '-s', '/bin/false'] if self.additional_group_list is not None: user_parameter_list.extend(['-G', ','.join(self.additional_group_list)]) user_parameter_list.append(self.name) try: pwd.getpwnam(self.name) except KeyError: user_parameter_list.append('-r') callAndRead(['useradd'] + user_parameter_list) else: callAndRead(['usermod'] + user_parameter_list) return True
def install(self, env): import params env.set_params(params) self.install_packages(env) kc.copy_cache_or_repo(self.package, cache_dir=self.installer_cache_path, arch='noarch') self.clean_up_failed_install() Execute('unzip -o -q %s -d %s' % (self.package, params.installation_dir)) Execute('mv %s/jb*/* %s' % (params.installation_dir, params.installation_dir)) Execute('rm -rf %s/jb*.Final' % params.installation_dir) try: grp.getgrnam('jboss') except KeyError: Execute('groupadd jboss') try: pwd.getpwnam('jboss') except KeyError: Execute('useradd -s /bin/bash -g jboss jboss') Execute('chown -Rf jboss:jboss %s' % params.installation_dir) File('/etc/init.d/jboss', content=Template("jboss.j2"), mode=0755 ) Execute('chkconfig --add jboss') Execute('chkconfig jboss on') self.configure(env)
def _init_log_file(self): if self._log_file is not None: self._log_file.close() if os.getuid() == 0 and not is_sudoed: logs_dir = SYSTEM_LOGS_DIR else: logs_dir = USER_LOGS_DIR if not os.path.exists(logs_dir): os.makedirs(logs_dir) # Fix permissions in case we need to create the dir with sudo if is_sudoed: uid = pwd.getpwnam(usr).pw_uid gid = grp.getgrnam(usr).gr_gid os.chown(logs_dir, uid, gid) log_fn = "{}/{}.log".format(logs_dir, self._app_name) # Fix permissions in case we need to create the file with sudo if not os.path.isfile(log_fn) and is_sudoed: # touch with open(log_fn, 'a'): pass uid = pwd.getpwnam(usr).pw_uid gid = grp.getgrnam(usr).gr_gid os.chown(log_fn, uid, gid) self._log_file = open("{}/{}.log".format(logs_dir, self._app_name), "a")
def drop_privileges(uid='nobody', gid='nogroup', supl_groups=None): """ Drop privileges by changing the current process owner/group to *uid*/*gid* (both may be an integer or a string). If *supl_groups* (list) is given the process will be assigned those values as its effective supplemental groups. If *supl_groups* is None it will default to using 'tty' as the only supplemental group. Example:: drop_privileges('gateone', 'gateone', ['tty']) This would change the current process owner to gateone/gateone with 'tty' as its only supplemental group. .. note:: On most Unix systems users must belong to the 'tty' group to create new controlling TTYs which is necessary for 'pty.fork()' to work. .. tip:: If you get errors like, "OSError: out of pty devices" it likely means that your OS uses something other than 'tty' as the group owner of the devpts filesystem. 'mount | grep pts' will tell you the owner. """ import pwd, grp running_gid = gid if not isinstance(uid, int): # Get the uid/gid from the name running_uid = pwd.getpwnam(uid).pw_uid running_uid = uid if not isinstance(gid, int): running_gid = grp.getgrnam(gid).gr_gid if supl_groups: for i, group in enumerate(supl_groups): # Just update in-place if not isinstance(group, int): supl_groups[i] = grp.getgrnam(group).gr_gid try: os.setgroups(supl_groups) except OSError, e: logging.error(_('Could not set supplemental groups: %s' % e)) exit()
def groupExist(name_): try: grp.getgrnam(name_) except KeyError: return False return True
def get_or_create_ids(username, groupname): """ Get the UID and GID for a user and group, creating the user and group if necessary. Users are created with no login shell: if they need a shell, downstream init scripts should update it. """ try: gid = grp.getgrnam(groupname).gr_gid except KeyError: logger.info("Creating group %s", groupname) subprocess.call(['/usr/sbin/groupadd', '-f', groupname]) gid = grp.getgrnam(groupname).gr_gid try: uid = pwd.getpwnam(username).pw_uid except KeyError: logger.info("Creating user %s", username) command = '/usr/sbin/adduser' command_input = ['--gid', str(gid), '--shell', '/sbin/nologin', username] exit_code = subprocess.call([command, '--system'] + command_input) # if the above command fails its highly likely that we are in a Centos 5 # system and it doesnt have `--system` option instead it has `-r`. if exit_code != 0: subprocess.call([command, '-r'] + command_input) uid = pwd.getpwnam(username).pw_uid return uid, gid
def test_mc_zero_timeout(ldap_conn, zero_timeout_rfc2307): """ Test that the memory cache is not created at all with memcache_timeout=0 """ # No memory cache files must be created assert len(os.listdir(config.MCACHE_PATH)) == 0 ent.assert_passwd_by_name( 'user1', dict(name='user1', passwd='*', uid=1001, gid=2001, gecos='1001', shell='/bin/bash')) ent.assert_passwd_by_uid( 1001, dict(name='user1', passwd='*', uid=1001, gid=2001, gecos='1001', shell='/bin/bash')) ent.assert_group_by_name("group1", dict(name="group1", gid=2001)) ent.assert_group_by_gid(2001, dict(name="group1", gid=2001)) stop_sssd() # sssd is stopped; so the memory cache should not be used # in long living clients (py.test in this case) with pytest.raises(KeyError): pwd.getpwnam('user1') with pytest.raises(KeyError): pwd.getpwuid(1001) with pytest.raises(KeyError): grp.getgrnam('group1') with pytest.raises(KeyError): grp.getgrgid(2001)
def test_group_2307_delete_group(ldap_conn, ldb_examine, setup_rfc2307): """ Test that deleting a group removes it from both caches """ ldb_conn = ldb_examine old_sysdb_attrs, old_ts_attrs = prime_cache_group( ldb_conn, "group1", ("user1", "user11", "user21")) e = ldap_ent.group_bis(ldap_conn.ds_inst.base_dn, "group1", 2001) ldap_conn.delete_s(e[0]) # wait for slapd to change its database time.sleep(1) with pytest.raises(KeyError): grp.getgrnam("group1") sysdb_attrs, ts_attrs = get_group_attrs(ldb_conn, "group1", SSSD_DOMAIN, TS_ATTRLIST) assert sysdb_attrs.get("dataExpireTimestamp") is None assert sysdb_attrs.get("originalModifyTimestamp") is None assert ts_attrs.get("dataExpireTimestamp") is None assert ts_attrs.get("originalModifyTimestamp") is None
def __init__(self, name_or_gid = None): # If passed a string, assume group name # If passed a number, assume gid # If None, leave everything with a value of None # Initialize everything to None for i in self._fields: setattr(self, i, None) # Determine whether we were passed a name or a gid or a Group if isinstance(name_or_gid, Group): # Guessing it's a Group object - clone the settings # Clone if user name or gid present, otherwise None if name_or_gid != None: if name_or_gid.name is not None: gr_info = grp.getgrnam(name_or_gid.name) else: gr_info = grp.getgrgid(name_or_gid.gid) self._init_with_grp(gr_info) elif isinstance(name_or_gid, (int,long)): # Guessing it's a gid try: gr_info = grp.getgrgid(name_or_gid) self._init_with_grp(gr_info) except KeyError: self.gid = None elif isinstance(name_or_gid, basestring): # Guessing it's a group name try: gr_info = grp.getgrnam(name_or_gid) self._init_with_grp(gr_info) except KeyError: self.name = None
def add_group(group_name, system_group=False, gid=None): """Add a group to the system Will log but otherwise succeed if the group already exists. :param str group_name: group to create :param bool system_group: Create system group :param int gid: GID for user being created :returns: The password database entry struct, as returned by `grp.getgrnam` """ try: group_info = grp.getgrnam(group_name) log('group {0} already exists!'.format(group_name)) if gid: group_info = grp.getgrgid(gid) log('group with gid {0} already exists!'.format(gid)) except KeyError: log('creating group {0}'.format(group_name)) cmd = ['addgroup'] if gid: cmd.extend(['--gid', str(gid)]) if system_group: cmd.append('--system') else: cmd.extend([ '--group', ]) cmd.append(group_name) subprocess.check_call(cmd) group_info = grp.getgrnam(group_name) return group_info
def getAndVerifyExecutable(): """ Return the name of the docker executable. Exits and displays a user friendly error message if docker is not setup correctly. """ executable = getExecutable() if not executable: sys.exit("""Error: Docker is not installed. For installation instructions see <https://www.docker.io/gettingstarted/#h_installation>""") if not os.path.exists("/var/run/docker.pid"): sys.exit("""Error: Docker is not running. You can launch it as root with: # docker -d """) username = getpass.getuser() if not os.getuid() == 0: try: groupmembers = grp.getgrnam("docker").gr_mem except KeyError: groupmembers = grp.getgrnam("dockerroot").gr_mem if not username in groupmembers: sys.exit("""Error: You are not a member of the docker group. To learn how to become a member of the docker group please watch this video: <http://www.youtube.com/watch?v=ahgRx5U4V7E>""") return executable
def install(self, env): import params env.set_params(params) self.install_packages(env) kc.copy_cache_or_repo(self.package, cache_dir=self.installer_cache_path, arch='noarch') self.clean_up_failed_install() Execute('unzip -o -q %s ' % (self.package)) Execute('mv %s %s' % (self.package.replace('.zip', ''), params.installation_dir)) # Execute('rm -rf %s/jb*.Final' % params.installation_dir) try: grp.getgrnam(params.service_user) except KeyError: Execute('groupadd ' + params.service_user) try: pwd.getpwnam(params.service_user) except KeyError: Execute('useradd -s /bin/bash -g %s %s' % (params.service_user, params.service_user)) Execute('chown -Rf %s:%s %s' % (params.service_user, params.service_user, params.installation_dir)) import glob if not len(glob.glob(params.JAVA_HOME)): raise ValueError("Could not find JAVA_HOME in location : " + params.JAVA_HOME) self.configure(env)
def sync_groups(args): from pwd import getpwnam iam = boto3.resource("iam") for group in iam.groups.filter(PathPrefix="/keymaker/"): if not group.name.startswith("keymaker-"): continue logger.info("Syncing IAM group %s", group.name) unix_group_name = group.name[len("keymaker-"):] try: unix_group = grp.getgrnam(unix_group_name) except KeyError: logger.info("Provisioning group %s from IAM", unix_group_name) subprocess.check_call(["groupadd", "--gid", str(aws_to_unix_id(group.group_id)), unix_group_name]) unix_group = grp.getgrnam(unix_group_name) user_names_in_iam_group = [user.name for user in group.users.all()] for user in user_names_in_iam_group: try: uid = pwd.getpwnam(user).pw_uid if uid < 2000: raise ValueError(uid) except Exception: logger.warn("User %s is not provisioned or not managed by keymaker, skipping", user) continue if user not in unix_group.gr_mem: logger.info("Adding user %s to group %s", user, unix_group_name) subprocess.check_call(["usermod", "--append", "--groups", unix_group_name, user]) for unix_user_name in unix_group.gr_mem: if unix_user_name not in user_names_in_iam_group: subprocess.check_call(["gpasswd", "--delete", unix_user_name, unix_group_name])
def install(self, env): # import properties defined in -config.xml file from params class import params import status_params # Install packages listed in metainfo.xml self.install_packages(env) try: grp.getgrnam(params.logfeeder_group) except KeyError: Group(group_name=params.logfeeder_group) try: pwd.getpwnam(params.logfeeder_user) except KeyError: User( username=params.logfeeder_user, gid=params.logfeeder_group, groups=[params.logfeeder_group], ignore_failures=True, ) self.install_logfeeder() Execute('echo "logfeeder install complete"')
def showgroup_ui(form): if "group" in form: group=form["group"].value try: grp.getgrnam(group) except: print_ui(errorpage % ("No such group 1")) return grpvalue = grp.getgrnam(group) if ((grpvalue[2] < 1000) or (grpvalue[2] > 64000)): print_ui(errorpage % ("No such group 2")) return table= u"<tr><td>Группа</td><td>Пользователи</td><td>Комментарий к группе</td></tr>" table += u"<tr><td>%s</td>" % (group, ) table += u"<td>" grptable = u"<table width=\"100%\"><tr>" k=0 for p in grpvalue[3]: grptable += "<td width=12.5%><a href=\""+userbase + p +"\">" + unicode(p) + "</a></td>" if k%8 == 7: grptable += "</tr><tr>" k= k+1 grptable += "</tr></table>" table += grptable comment = db_exec_sql('select comment from comments where groupname = ?', (group,)) if comment == []: comment = "" else: comment = comment[0][0] table += u"</td><td>%s</td></tr>" % (comment, ) print_ui(showgrouppage % (group,table,)) else: print_ui(errorpage % ("No group specified",))
def output_JSONDataToFile(filePath, contents, warehouseUser): try: os.makedirs(os.path.dirname(filePath)) except OSError as exception: if exception.errno != errno.EEXIST: raise worker.stopJob() print "Unable to create directory for dashboard data file" return False finally: os.chown(os.path.dirname(filePath), pwd.getpwnam(warehouseUser).pw_uid, grp.getgrnam(warehouseUser).gr_gid) try: #print "Open JSON file" JSONFile = open(filePath, 'w') #print "Saving JSON file" json.dump(contents, JSONFile) except IOError: print "Error Saving JSON file" return False finally: #print "Closing JSON file" JSONFile.close() os.chown(filePath, pwd.getpwnam(warehouseUser).pw_uid, grp.getgrnam(warehouseUser).gr_gid) return True
def exists(self): """ True if this group is already exists under unix system """ try: getgrnam(self.name) return True except KeyError: return False
def _group_create(self, args): """Create local file on the system. If the path to the file does not exist, the path will be created. :param: args: ``dict`` """ for group_create in args: try: grp.getgrnam(group_create.get('group')) except KeyError: group = ['groupadd'] # Basic user or System user if group_create.get('system') is True: user_type = '--system %(group)s' else: user_type = '%(group)s' group.append(user_type) command = [' '.join(group) % group_create] self.__execute_command(commands=command) LOG.info('Group Created [ %s ]', group_create['group']) else: LOG.info( 'No Group Created it already Exists [ %s ]', group_create['group'] )
def test_sanity_rfc2307_bis(ldap_conn, sanity_rfc2307_bis): passwd_pattern = ent.contains_only( dict(name="user1", passwd="*", uid=1001, gid=2001, gecos="1001", dir="/home/user1", shell="/bin/bash"), dict(name="user2", passwd="*", uid=1002, gid=2002, gecos="1002", dir="/home/user2", shell="/bin/bash"), dict(name="user3", passwd="*", uid=1003, gid=2003, gecos="1003", dir="/home/user3", shell="/bin/bash"), ) ent.assert_passwd(passwd_pattern) group_pattern = ent.contains_only( dict(name="group1", passwd="*", gid=2001, mem=ent.contains_only()), dict(name="group2", passwd="*", gid=2002, mem=ent.contains_only()), dict(name="group3", passwd="*", gid=2003, mem=ent.contains_only()), dict(name="empty_group1", passwd="*", gid=2010, mem=ent.contains_only()), dict(name="empty_group2", passwd="*", gid=2011, mem=ent.contains_only()), dict(name="two_user_group", passwd="*", gid=2012, mem=ent.contains_only("user1", "user2")), dict(name="group_empty_group", passwd="*", gid=2013, mem=ent.contains_only()), dict(name="group_two_empty_groups", passwd="*", gid=2014, mem=ent.contains_only()), dict(name="one_user_group1", passwd="*", gid=2015, mem=ent.contains_only("user1")), dict(name="one_user_group2", passwd="*", gid=2016, mem=ent.contains_only("user2")), dict(name="group_one_user_group", passwd="*", gid=2017, mem=ent.contains_only("user1")), dict(name="group_two_user_group", passwd="*", gid=2018, mem=ent.contains_only("user1", "user2")), dict(name="group_two_one_user_groups", passwd="*", gid=2019, mem=ent.contains_only("user1", "user2")), ) ent.assert_group(group_pattern) with pytest.raises(KeyError): pwd.getpwnam("non_existent_user") with pytest.raises(KeyError): pwd.getpwuid(1) with pytest.raises(KeyError): grp.getgrnam("non_existent_group") with pytest.raises(KeyError): grp.getgrgid(1)
def test_sanity_rfc2307(ldap_conn, sanity_rfc2307): passwd_pattern = ent.contains_only( dict(name='user1', passwd='*', uid=1001, gid=2001, gecos='1001', dir='/home/user1', shell='/bin/bash'), dict(name='user2', passwd='*', uid=1002, gid=2002, gecos='1002', dir='/home/user2', shell='/bin/bash'), dict(name='user3', passwd='*', uid=1003, gid=2003, gecos='1003', dir='/home/user3', shell='/bin/bash') ) ent.assert_passwd(passwd_pattern) group_pattern = ent.contains_only( dict(name='group1', passwd='*', gid=2001, mem=ent.contains_only()), dict(name='group2', passwd='*', gid=2002, mem=ent.contains_only()), dict(name='group3', passwd='*', gid=2003, mem=ent.contains_only()), dict(name='empty_group', passwd='*', gid=2010, mem=ent.contains_only()), dict(name='two_user_group', passwd='*', gid=2012, mem=ent.contains_only("user1", "user2")) ) ent.assert_group(group_pattern) with pytest.raises(KeyError): pwd.getpwnam("non_existent_user") with pytest.raises(KeyError): pwd.getpwuid(1) with pytest.raises(KeyError): grp.getgrnam("non_existent_group") with pytest.raises(KeyError): grp.getgrgid(1)
def check_owner_files(self, full_path, user, group): import grp, pwd if pwd.getpwuid(os.stat(full_path).st_uid)[0] in user: os.chown(full_path, pwd.getpwnam(user).pw_uid, grp.getgrnam(group).gr_gid) if grp.getgrgid(os.stat(full_path).st_gid)[0] in group: os.chown(full_path, pwd.getpwnam(user).pw_uid, grp.getgrnam(group).gr_gid) self.write_log.write_log(full_path)
def GetLPID(default="lp", alternative="cups", useFiles=True): blacklistedGroups = ["adm", "wheel", "root"] blacklistedGroupIds = [] for group in blacklistedGroups: try: blacklistedGroupIds.append(grp.getgrnam(group).gr_gid) except: pass if useFiles: # check files in order for cupsConfigFile in ["/var/log/cups/access_log", "/etc/cups/ppd", "/usr/local/etc/cups/ppd"]: if os.path.exists(cupsConfigFile): if os.stat(cupsConfigFile).st_gid not in blacklistedGroupIds: return os.stat(cupsConfigFile).st_gid # try lp first, then cups lpgrp = None try: lpgrp = grp.getgrnam(default) except: try: lpgrp = grp.getgrnam(alternative) except: pass if lpgrp == None: return None else: return lpgrp.gr_gid
def test_getLPID(): assert int(Utils.GetLPID()) > 0 assert Utils.GetLPID() is not None import grp workingPrintGroupName = 'lp' try: grp.getgrnam(workingPrintGroupName) except: workingPrintGroupName = 'cups' pass assert Utils.GetLPID('brokendefault', 'brokenalternative', False) is None assert int( Utils.GetLPID( 'brokendefault', workingPrintGroupName, False)) > 0 assert Utils.GetLPID( 'brokendefault', workingPrintGroupName, False) is not None # test blacklist works assert Utils.GetLPID( workingPrintGroupName, 'brokenalternative', True, [workingPrintGroupName, 'brokendefault', 'adm', 'wheel', 'root'], True) is None
def is_valid_groupname(groupname): try: grp.getgrnam(groupname) except KeyError: return False else: return True
def updateTable(self): self.userList=pwd.getpwall() crow=-1 self.tableTeachers.clearContents() self.tableTeachers.setRowCount(0) if grp.getgrnam('teachers') : grp_users = grp.getgrnam('teachers').gr_mem if grp_users: for usr in self.userList: if grp.getgrgid(usr[3])[0] in grp_users: #self.tableTeachers.setRowCount(100) self.tableTeachers.setRowCount(self.tableTeachers.rowCount()+1) crow += 1 gecos=usr.pw_gecos.split(',') # Nome Completo itm=QtGui.QTableWidgetItem(gecos[0]) itm.setFlags(itm.flags() & ~QtCore.Qt.ItemIsEditable) self.tableTeachers.setItem(crow,0,itm) # username itm=QtGui.QTableWidgetItem(usr[0]) itm.setFlags(itm.flags() & ~QtCore.Qt.ItemIsEditable) self.tableTeachers.setItem(crow,1,itm) self.tableTeachers.sortByColumn(1, QtCore.Qt.AscendingOrder)
def usercheck(acc): out = "" username = acc["name"] userroot = acc["root"] try: pwd.getpwnam(username) except KeyError: runscript("groupadd %s" % username) runscript("useradd -g %s -p %s %s" % (username, acc["FTPpass"], username)) out += "Creating user for %s" % username uid = pwd.getpwnam(username).pw_uid gid = grp.getgrnam(username).gr_gid if not os.path.isdir(userroot): os.makedirs(userroot+"/htdocs") os.makedirs(userroot+"/logs") os.makedirs(userroot+"/backup") rootuid = pwd.getpwnam("root").pw_uid rootgid = grp.getgrnam("nogroup").gr_gid os.chown(userroot+"/htdocs", uid, gid) os.chown(userroot+"/logs", rootuid, rootgid) os.chown(userroot+"/backup", rootuid, rootgid) os.chmod(userroot+"/logs", 0775) os.chmod(userroot+"/backup", 0775) os.chmod(userroot+"/htdocs", 0775) out += "Creating folders for %s" % username chownR(userroot+"/htdocs", uid, gid) return out
def chown(self, path, user, group,recursive=False): """ Chown a file @param path: the path of a file or a directory to be chown @type path: string @param user: username to be used as the new owner @type user: string @param group: groupname to be used as the new group owner (if None, then root is used as a groupname0 @type group: string @param recursive: if path is a directory, all files underneath the path are also chown if True (default False) @type recursive: boolean """ if not group: group = 'root' j.logger.log('Chown %s:%s %s'%(user,group,path),8) uid=pwd.getpwnam(user).pw_uid if group==None: gid=grp.getgrnam(group).gr_gid else: gid=grp.getgrnam(group).gr_gid os.chown(path, uid, gid) if recursive: files=j.sal.fs.walk(path,return_folders=1,return_files=1,recurse=-1) for file in files: os.chown(file,uid,gid)
def test_groupdel(group_ops_setup): group_ops_setup.groupadd(**GROUP1) ent.assert_group_by_name("group1", GROUP1) group_ops_setup.groupdel("group1") with pytest.raises(KeyError): grp.getgrnam("group1")
def sort_file(config, srcpath, dstpath, mediatype, action, infofile, shasum, chown, user, group, file_mode, directory_mode, metainfo_tag, dryrun): # Get UID and GID for chowning uid = pwd.getpwnam(user)[2] gid = grp.getgrnam(group)[2] logger(config, ">>> Parsing {}".format(srcpath)) # Determine if srcpath is a directory, then act recursively if os.path.isdir(srcpath): for filename in sorted(os.listdir(srcpath)): child_filename = '{}/{}'.format(srcpath, filename) sort_file(config, child_filename, dstpath, mediatype, action, infofile, shasum, chown, user, group, file_mode, directory_mode, metainfo_tag, dryrun) return 0 logger(config, "Sorting action: {}".format(action)) # Get our destination path and filename (media-specific) if mediatype == 'tv': file_dst_path, file_dst_filename = sort_tv_file(config, srcpath, dstpath) if mediatype == 'movie': file_dst_path, file_dst_filename = sort_movie_file(config, srcpath, dstpath, metainfo_tag) if not file_dst_filename: return 1 # Ensure our dst_path exists or create it if not os.path.isdir(file_dst_path) and not dryrun: logger(config, "Creating target directory '{}'".format(file_dst_path)) os.makedirs(file_dst_path) if chown: os.chown(file_dst_path, uid, gid) os.chmod(file_dst_path, int(directory_mode, 8)) file_dst = '{}/{}'.format(file_dst_path, file_dst_filename) if dryrun: # Make the output quoted srcpath = '"{}"'.format(srcpath) file_dst = '"{}"'.format(file_dst) # Perform our action if action == 'symlink': action_cmd = ['ln', '-s', '{}'.format(srcpath), '{}'.format(file_dst)] if action == 'hardlink': action_cmd = ['ln', '{}'.format(srcpath), '{}'.format(file_dst)] if action == 'copy': action_cmd = ['cp', '{}'.format(srcpath), '{}'.format(file_dst)] if action == 'move': action_cmd = ['mv', '{}'.format(srcpath), '{}'.format(file_dst)] if dryrun: logger(config, "Sort command: {}".format(' '.join(action_cmd))) return 0 # Run the action logger(config, "Running sort action... ", nl=False) process = subprocess.run(action_cmd) retcode = process.returncode logger(config, "done.") if retcode != 0: return retcode # Create info file if infofile: logger(config, "Creating info file... ", nl=False) infofile_name = '{}.txt'.format(file_dst) infofile_contents = [ "Source filename: {}".format(os.path.basename(srcpath)), "Source directory: {}".format(os.path.dirname(srcpath)) ] with open(infofile_name, 'w') as fh: fh.write('\n'.join(infofile_contents)) fh.write('\n') logger(config, "done.") # Create sha256sum file if shasum: logger(config, "Generating shasum file... ", nl=False) shasum_name = '{}.sha256sum'.format(file_dst) shasum_cmdout = subprocess.run(['sha256sum', '-b', '{}'.format(file_dst)], capture_output=True, encoding='utf8') shasum_data = shasum_cmdout.stdout if shasum_data: shasum_data = shasum_data.strip() with open(shasum_name, 'w') as fh: fh.write(shasum_data) fh.write('\n') logger(config, "done.") if chown: logger(config, "Correcting ownership and permissions... ", nl=False) os.chown(file_dst, uid, gid) os.chmod(file_dst, int(file_mode, 8)) if infofile: os.chown(infofile_name, uid, gid) os.chmod(infofile_name, int(file_mode, 8)) if shasum: os.chown(shasum_name, uid, gid) os.chmod(shasum_name, int(file_mode, 8)) logger(config, "done.") return retcode
master_ip = cluster_config['nodes'][0] print( "Wazuh is running in cluster mode: {EXECUTABLE_NAME} is not available in worker nodes. " "Please, try again in the master node: {MASTER_IP}".format( EXECUTABLE_NAME=executable_name, MASTER_IP=master_ip)) sys.exit(1) if os.geteuid() != 0: print( "You need root privileges to run this script. Please try again, using 'sudo'. Exiting." ) sys.exit(1) try: root_uid = getpwnam("root").pw_uid ossec_gid = getgrnam("ossec").gr_gid except: sys.exit(1) # Arguments arguments = { 'source': 'download', 'restart': 'ask', 'backups': False, 'force': False, 'debug': False, 'json': False, 'branch-name': False, 'url': False } restart_args = 0
def makeService(config): s = appservice.MultiService() conf = inetdconf.InetdConf() with open(config['file']) as f: conf.parseFile(f) for service in conf.services: protocol = service.protocol if service.protocol.startswith('rpc/'): log.msg('Skipping rpc service due to lack of rpc support') continue if (protocol, service.socketType) not in [('tcp', 'stream'), ('udp', 'dgram')]: log.msg('Skipping unsupported type/protocol: %s/%s' % (service.socketType, service.protocol)) continue # Convert the username into a uid (if necessary) try: service.user = int(service.user) except ValueError: try: service.user = pwd.getpwnam(service.user)[2] except KeyError: log.msg('Unknown user: '******'s primary group service.group = pwd.getpwuid(service.user)[3] else: try: service.group = int(service.group) except ValueError: try: service.group = grp.getgrnam(service.group)[2] except KeyError: log.msg('Unknown group: ' + service.group) continue if service.program == 'internal': if config['nointernal']: continue # Internal services can use a standard ServerFactory if service.name not in inetd.internalProtocols: log.msg('Unknown internal service: ' + service.name) continue factory = ServerFactory() factory.protocol = inetd.internalProtocols[service.name] else: factory = inetd.InetdFactory(service) if protocol == 'tcp': internet.TCPServer(service.port, factory).setServiceParent(s) elif protocol == 'udp': raise RuntimeError("not supporting UDP") return s
def run(self): # remove /usr for bdist/bdist_rpm match = re.search('(build/[^/]+/dumb)/usr', self.install_dir) if match != None: self.install_dir = re.sub(match.group(0), match.group(1), self.install_dir) # remove /var/tmp/*-buildroot for bdist_rpm match = re.search('(/var/tmp/.*-buildroot)/usr', self.install_dir) if match != None: self.install_dir = re.sub(match.group(0), match.group(1), self.install_dir) # create tmp area tmpDir = 'build/tmp' self.mkpath(tmpDir) new_data_files = [] for destDir, dataFiles in self.data_files: newFilesList = [] for srcFile in dataFiles: # check extension if not srcFile.endswith('.template'): raise RuntimeError, "%s doesn't have the .template extension" % srcFile # dest filename destFile = re.sub('(\.exe)*\.template$', '', srcFile) destFile = re.sub(r'^templates/', '', destFile) destFile = '%s/%s' % (tmpDir, destFile) # open src inFile = open(srcFile) # read filedata = inFile.read() # close inFile.close() # replace patterns for item in re.findall('@@([^@]+)@@', filedata): if not hasattr(self, item): raise RuntimeError, 'unknown pattern %s in %s' % ( item, srcFile) # get pattern patt = getattr(self, item) # remove install root, if any if self.root is not None and patt.startswith(self.root): patt = patt[len(self.root):] # remove build/*/dump for bdist patt = re.sub('build/[^/]+/dumb', '', patt) # remove /var/tmp/*-buildroot for bdist_rpm patt = re.sub('/var/tmp/.*-buildroot', '', patt) # replace filedata = filedata.replace('@@%s@@' % item, patt) # write to dest if '/' in destFile: destSubDir = os.path.dirname(destFile) if not os.path.exists(destSubDir): os.makedirs(destSubDir) oFile = open(destFile, 'w') oFile.write(filedata) oFile.close() # chmod for exe if srcFile.endswith('.exe.template'): commands.getoutput('chmod +x %s' % destFile) # append newFilesList.append(destFile) # replace dataFiles to install generated file new_data_files.append((destDir, newFilesList)) # install self.data_files = new_data_files install_data_org.run(self) #post install uid = pwd.getpwnam(panda_user).pw_uid gid = grp.getgrnam(panda_group).gr_gid for directory in [ '/var/log/panda', '/var/log/panda/wsgisocks', '/var/log/panda/fastsocks' ]: if not os.path.exists(directory): os.makedirs(directory) os.chown(directory, uid, gid)
def gid(self): return grp.getgrnam(self.group).gr_gid
#!/usr/bin/python import os from boto.ses.connection import SESConnection # AWS Credentials retrieval using encrypted C shared library ## import os, pwd, grp f = open('/tmp/.uwsgi.lock', 'w+') f.close() uid = pwd.getpwnam('oskar').pw_uid gid = grp.getgrnam('oskar').gr_gid os.chown('/tmp/.uwsgi.lock', uid, gid) import ctypes from ctypes import CDLL pylibc = CDLL("/home/ella/Ella/ella/awsenckeys.so") pylibc.awsakey.restype = ctypes.c_char_p pylibc.awsskey.restype = ctypes.c_char_p AWSAKEY = pylibc.awsakey() AWSSKEY = pylibc.awsskey() ###################################################### conn = SESConnection(AWSAKEY, AWSSKEY) data = conn.get_send_statistics() data = data["GetSendStatisticsResponse"]["GetSendStatisticsResult"] for i in data["SendDataPoints"]:
def create_db(self, user=None, group=None, mode=None, backup=False): """Create cert DB :param user: User owner the secdir :param group: Group owner of the secdir :param mode: Mode of the secdir :param backup: Backup the sedir files """ if mode is not None: dirmode = mode filemode = mode & 0o666 pwdfilemode = mode & 0o660 else: dirmode = 0o750 filemode = 0o640 pwdfilemode = 0o640 uid = -1 gid = -1 if user is not None: uid = pwd.getpwnam(user).pw_uid if group is not None: gid = grp.getgrnam(group).gr_gid if backup: for filename in self.backup_filenames: ipautil.backup_file(filename) if not os.path.exists(self.secdir): os.makedirs(self.secdir, dirmode) if not os.path.exists(self.pwd_file): # Create the password file for this db with io.open(os.open(self.pwd_file, os.O_CREAT | os.O_WRONLY, pwdfilemode), 'w', closefd=True) as f: f.write(ipautil.ipa_generate_password()) # flush and sync tempfile inode f.flush() os.fsync(f.fileno()) # In case dbtype is auto, let certutil decide which type of DB # to create. if self.dbtype == 'auto': dbdir = self.secdir else: dbdir = '{}:{}'.format(self.dbtype, self.secdir) args = [ paths.CERTUTIL, '-d', dbdir, '-N', '-f', self.pwd_file, # -@ in case it's an old db and it must be migrated '-@', self.pwd_file, ] ipautil.run(args, stdin=None, cwd=self.secdir) self._set_filenames(self._detect_dbtype()) if self.filenames is None: # something went wrong... raise ValueError("Failed to create NSSDB at '{}'".format( self.secdir)) # Finally fix up perms os.chown(self.secdir, uid, gid) os.chmod(self.secdir, dirmode) tasks.restore_context(self.secdir, force=True) for filename in self.filenames: if os.path.exists(filename): os.chown(filename, uid, gid) if filename == self.pwd_file: new_mode = pwdfilemode else: new_mode = filemode os.chmod(filename, new_mode) tasks.restore_context(filename, force=True)
def apply(self, item): data = item[self.provider] file_path = data["path"] set_mode = False if "mode" in data: if isinstance(data["mode"], int): file_mode = data["mode"] # TODO check range #elif g+x u+s else: raise Exception("invalid mode") set_mode = True change_owner = False if "owner" in data: # get user details by username or uid if isinstance(data["owner"], int): file_owner = getpwuid(data["owner"]) elif isinstance(data["owner"], str): file_owner = getpwnam(data["owner"]) else: raise Exception("invalid data type for user") change_owner = True change_group = False if "group" in data: # get group details by groupname or gid if isinstance(data["group"], int): file_group = getpwgid(data["group"]) elif isinstance(data["owner"], str): file_group = getgrnam(data["group"]) else: raise Exception("invalid data type for user") change_group = True # content write_contents_to_file = False if "content" in data: content = data["content"] write_contents_to_file = True elif "template" in data: template = data["template"] if not os.path.exists(template): raise Exception("template '{0}' not found".format(template)) self.logger.debug("loading template '{0}'".format(template)) with open(template, "r") as template_file: template = Template(template_file.read()) if "vars" in data: template_vars = data["vars"] else: template_vars = {} content = template.substitute(template_vars) write_contents_to_file = True if write_contents_to_file: self.logger.debug("writing to file '{0}'".format(file_path)) with open(file_path, "w") as outfile: outfile.write(content) # the file has to exist at this stage if not os.path.exists(file_path): raise Exception("file '{0}' not found.".format(file_path)) # set mode if set_mode: self.logger.debug("setting mode to {0}".format(file_mode)) os.chmod(file_path, file_mode) # PermissionError # set owner and group if change_owner: self.logger.debug("setting owner of '{1}' to '{0}'".format(file_owner.pw_name, file_path)) os.chown(file_path, file_owner.pw_uid, -1) # change only the owner if change_group: self.logger.debug("setting group of '{1}' to '{0}'".format(file_group.gr_name, file_path)) os.chown(file_path, -1, file_group.gr_gid) # change only the group return
default='', help='the password used to login into TVHeadend') parser.add_argument('-R', dest='do_radio_epg', action='store_const', const=True, default=False, help='fetch EPG data for radio channels') args = parser.parse_args() logging.basicConfig(level=logging.DEBUG) if (args.daemonize): # switch user and do daemonization try: uid = pwd.getpwnam(args.as_user).pw_uid gid = grp.getgrnam(args.as_group).gr_gid except KeyError as exc: debug( 'Unable to find the user {0} and group {1} for daemonization'. format(args.as_user, args.as_group)) sys.exit(1) pid_fd = open(args.pid_filename, 'w') switch_user(uid, gid) # switch to syslog logging.basicConfig(stream=logging.handlers.SysLogHandler()) daemonize() else: pid_fd = open(args.pid_filename, 'w')
#!/usr/bin/env python import os import pwd import grp import sys if 0 != os.getuid(): print "%s : you must to be a root" % sys.argv[0] exit(1) # username:grpname ids = None uid = None gid = None if ':' in sys.argv[1]: ids = sys.argv[1].split(':') uid = pwd.getpwnam(ids[0]).pw_uid gid = grp.getgrnam(ids[1]).gr_gid else: uid = pwd.getpwnam(sys.argv[1]).pw_uid gid = os.stat(sys.argv[2]).st_gid os.chown(sys.argv[2], uid, gid)
def diagnose_env_linux(): """ Run diagnostics in the running environment. Returns `True` when everything is ok, otherwise `False`. """ ret = True # Test log path exists before installing handler. if not os.path.isdir("/tmp"): logger.warning("could not find /tmp for logs") else: os.system("mkdir /tmp/topotests") # Log diagnostics to file so it can be examined later. fhandler = logging.FileHandler( filename="/tmp/topotests/diagnostics.txt") fhandler.setLevel(logging.DEBUG) fhandler.setFormatter( logging.Formatter(fmt="%(asctime)s %(levelname)s: %(message)s")) logger.addHandler(fhandler) logger.info("Running environment diagnostics") # Load configuration config = configparser.ConfigParser(defaults=tgen_defaults) pytestini_path = os.path.join(CWD, "../pytest.ini") config.read(pytestini_path) # Assert that we are running as root if os.getuid() != 0: logger.error("you must run topotest as root") ret = False # Assert that we have mininet if os.system("which mn >/dev/null 2>/dev/null") != 0: logger.error( "could not find mininet binary (mininet is not installed)") ret = False # Assert that we have iproute installed if os.system("which ip >/dev/null 2>/dev/null") != 0: logger.error("could not find ip binary (iproute is not installed)") ret = False # Assert that we have gdb installed if os.system("which gdb >/dev/null 2>/dev/null") != 0: logger.error("could not find gdb binary (gdb is not installed)") ret = False # Assert that FRR utilities exist frrdir = config.get("topogen", "frrdir") if not os.path.isdir(frrdir): logger.error("could not find {} directory".format(frrdir)) ret = False else: try: pwd.getpwnam("frr")[2] except KeyError: logger.warning('could not find "frr" user') try: grp.getgrnam("frr")[2] except KeyError: logger.warning('could not find "frr" group') try: if "frr" not in grp.getgrnam("frrvty").gr_mem: logger.error( '"frr" user and group exist, but user is not under "frrvty"' ) except KeyError: logger.warning('could not find "frrvty" group') for fname in [ "zebra", "ospfd", "ospf6d", "bgpd", "ripd", "ripngd", "isisd", "pimd", "ldpd", "pbrd", ]: path = os.path.join(frrdir, fname) if not os.path.isfile(path): # LDPd is an exception if fname == "ldpd": logger.info( "could not find {} in {}".format(fname, frrdir) + "(LDPd tests will not run)") continue logger.warning("could not find {} in {}".format(fname, frrdir)) ret = False else: if fname != "zebra": continue os.system( "{} -v 2>&1 >/tmp/topotests/frr_zebra.txt".format(path)) # Test MPLS availability krel = platform.release() if topotest.version_cmp(krel, "4.5") < 0: logger.info( 'LDPd tests will not run (have kernel "{}", but it requires 4.5)'. format(krel)) # Test for MPLS Kernel modules available if not topotest.module_present("mpls-router", load=False) != 0: logger.info( "LDPd tests will not run (missing mpls-router kernel module)") if not topotest.module_present("mpls-iptunnel", load=False) != 0: logger.info( "LDPd tests will not run (missing mpls-iptunnel kernel module)") # TODO remove me when we start supporting exabgp >= 4 try: p = os.popen("exabgp -v") line = p.readlines() version = line[0].split() if topotest.version_cmp(version[2], "4") >= 0: logger.warning( "BGP topologies are still using exabgp version 3, expect failures" ) p.close() # We want to catch all exceptions # pylint: disable=W0702 except: logger.warning("failed to find exabgp or returned error") # After we logged the output to file, remove the handler. logger.removeHandler(fhandler) fhandler.close() return ret
if not settings.iptables or not os.path.exists(settings.iptables): sys.exit("The `iptables` binary is not available, eh?!") if os.getuid(): sys.exit("This utility is supposed to be ran as root.") if os.path.exists(settings.socket): os.remove(settings.socket) server = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) server.bind(settings.socket) # Provide the correct file ownership and permission so Cuckoo can use it # from an unprivileged process, based on Sean Whalen's routetor. try: gr = grp.getgrnam(settings.group) except KeyError: sys.exit( "The group (`%s`) does not exist. Please define the group / user " "through which Cuckoo will connect to the rooter, e.g., " "./utils/rooter.py -g myuser" % settings.group) os.chown(settings.socket, 0, gr.gr_gid) os.chmod(settings.socket, stat.S_IRUSR | stat.S_IWUSR | stat.S_IWGRP) while True: command, addr = server.recvfrom(4096) try: obj = json.loads(command) except:
def set_directory_ownership(path, username, groupname): if os.getuid() == 0: uid = pwd.getpwnam(username).pw_uid gid = grp.getgrnam(groupname).gr_gid os.chown(path, uid, gid)
_setup_protocols(root) _setup_static(root, config.map) start_listening(site, config.map, logger) # switch uid and gid to configured user and group. if os.name == 'posix' and os.getuid() == 0: user = config.map['[global]'].get('user') group = config.map['[global]'].get('group') if user: import pwd import grp try: pw = pwd.getpwnam(user) uid = pw.pw_uid if group: gr = grp.getgrnam(group) gid = gr.gr_gid else: gid = pw.pw_gid gr = grp.getgrgid(gid) group = gr.gr_name except Exception, e: logger.error('Aborting; Unknown user or group: %s' % e) sys.exit(1) logger.info('switching to user %s (uid=%d) and group %s (gid=%d)' % (user, uid, group, gid)) os.setgid(gid) os.setuid(uid) else: logger.error( 'Aborting; You must define a user (and optionally a group) in the configuration file.'
def diagnose_env(): """ Run diagnostics in the running environment. Returns `True` when everything is ok, otherwise `False`. """ ret = True # Test log path exists before installing handler. if not os.path.isdir('/tmp'): logger.warning('could not find /tmp for logs') else: os.system('mkdir /tmp/topotests') # Log diagnostics to file so it can be examined later. fhandler = logging.FileHandler( filename='/tmp/topotests/diagnostics.txt') fhandler.setLevel(logging.DEBUG) fhandler.setFormatter( logging.Formatter(fmt='%(asctime)s %(levelname)s: %(message)s')) logger.addHandler(fhandler) logger.info('Running environment diagnostics') # Load configuration config = ConfigParser.ConfigParser(tgen_defaults) pytestini_path = os.path.join(CWD, '../pytest.ini') config.read(pytestini_path) # Assert that we are running as root if os.getuid() != 0: logger.error('you must run topotest as root') ret = False # Assert that we have mininet if os.system('which mn >/dev/null 2>/dev/null') != 0: logger.error( 'could not find mininet binary (mininet is not installed)') ret = False # Assert that we have iproute installed if os.system('which ip >/dev/null 2>/dev/null') != 0: logger.error('could not find ip binary (iproute is not installed)') ret = False # Assert that we have gdb installed if os.system('which gdb >/dev/null 2>/dev/null') != 0: logger.error('could not find gdb binary (gdb is not installed)') ret = False # Assert that FRR utilities exist frrdir = config.get('topogen', 'frrdir') hasfrr = False if not os.path.isdir(frrdir): logger.error('could not find {} directory'.format(frrdir)) ret = False else: hasfrr = True try: pwd.getpwnam('frr')[2] except KeyError: logger.warning('could not find "frr" user') try: grp.getgrnam('frr')[2] except KeyError: logger.warning('could not find "frr" group') try: if 'frr' not in grp.getgrnam('frrvty').gr_mem: logger.error( '"frr" user and group exist, but user is not under "frrvty"' ) except KeyError: logger.warning('could not find "frrvty" group') for fname in [ 'zebra', 'ospfd', 'ospf6d', 'bgpd', 'ripd', 'ripngd', 'isisd', 'pimd', 'ldpd' ]: path = os.path.join(frrdir, fname) if not os.path.isfile(path): # LDPd is an exception if fname == 'ldpd': logger.info( 'could not find {} in {}'.format(fname, frrdir) + '(LDPd tests will not run)') continue logger.warning('could not find {} in {}'.format(fname, frrdir)) ret = False else: if fname != 'zebra': continue os.system( '{} -v 2>&1 >/tmp/topotests/frr_zebra.txt'.format(path)) # Assert that Quagga utilities exist quaggadir = config.get('topogen', 'quaggadir') if hasfrr: # if we have frr, don't check for quagga pass elif not os.path.isdir(quaggadir): logger.info( 'could not find {} directory (quagga tests will not run)'.format( quaggadir)) else: ret = True try: pwd.getpwnam('quagga')[2] except KeyError: logger.info('could not find "quagga" user') try: grp.getgrnam('quagga')[2] except KeyError: logger.info('could not find "quagga" group') try: if 'quagga' not in grp.getgrnam('quaggavty').gr_mem: logger.error( '"quagga" user and group exist, but user is not under "quaggavty"' ) except KeyError: logger.warning('could not find "quaggavty" group') for fname in [ 'zebra', 'ospfd', 'ospf6d', 'bgpd', 'ripd', 'ripngd', 'isisd', 'pimd' ]: path = os.path.join(quaggadir, fname) if not os.path.isfile(path): logger.warning('could not find {} in {}'.format( fname, quaggadir)) ret = False else: if fname != 'zebra': continue os.system( '{} -v 2>&1 >/tmp/topotests/quagga_zebra.txt'.format(path)) # Test MPLS availability krel = platform.release() if topotest.version_cmp(krel, '4.5') < 0: logger.info( 'LDPd tests will not run (have kernel "{}", but it requires 4.5)'. format(krel)) # Test for MPLS Kernel modules available if os.system('/sbin/modprobe -n mpls-router') != 0: logger.info( 'LDPd tests will not run (missing mpls-router kernel module)') if os.system('/sbin/modprobe -n mpls-iptunnel') != 0: logger.info( 'LDPd tests will not run (missing mpls-iptunnel kernel module)') # TODO remove me when we start supporting exabgp >= 4 try: output = subprocess.check_output(['exabgp', '-v']) line = output.split('\n')[0] version = line.split(' ')[2] if topotest.version_cmp(version, '4') >= 0: logger.warning( 'BGP topologies are still using exabgp version 3, expect failures' ) # We want to catch all exceptions # pylint: disable=W0702 except: logger.warning('failed to find exabgp or returned error') # After we logged the output to file, remove the handler. logger.removeHandler(fhandler) return ret
def mod_run_check(cmd_kwargs, onlyif, unless, group, creates): ''' Execute the onlyif and unless logic. Return a result dict if: * group is not available * onlyif failed (onlyif != 0) * unless succeeded (unless == 0) else return True ''' # never use VT for onlyif/unless executions because this will lead # to quote problems cmd_kwargs = copy.deepcopy(cmd_kwargs) cmd_kwargs['use_vt'] = False if group and HAS_GRP: try: egid = grp.getgrnam(group).gr_gid if not __opts__['test']: os.setegid(egid) except KeyError: return {'comment': 'The group {0} is not available'.format(group), 'result': False} if onlyif is not None: if isinstance(onlyif, string_types): cmd = __salt__['cmd.retcode'](onlyif, ignore_retcode=True, python_shell=True, **cmd_kwargs) log.debug('Last command return code: {0}'.format(cmd)) if cmd != 0: return {'comment': 'onlyif execution failed', 'skip_watch': True, 'result': True} elif isinstance(onlyif, list): for entry in onlyif: cmd = __salt__['cmd.retcode'](entry, ignore_retcode=True, python_shell=True, **cmd_kwargs) log.debug('Last command return code: {0}'.format(cmd)) if cmd != 0: return {'comment': 'onlyif execution failed', 'skip_watch': True, 'result': True} elif not isinstance(onlyif, string_types): if not onlyif: log.debug('Command not run: onlyif did not evaluate to string_type') return {'comment': 'onlyif execution failed', 'skip_watch': True, 'result': True} if unless is not None: if isinstance(unless, string_types): cmd = __salt__['cmd.retcode'](unless, ignore_retcode=True, python_shell=True, **cmd_kwargs) log.debug('Last command return code: {0}'.format(cmd)) if cmd == 0: return {'comment': 'unless execution succeeded', 'skip_watch': True, 'result': True} elif isinstance(unless, list): cmd = [] for entry in unless: cmd.append(__salt__['cmd.retcode'](entry, ignore_retcode=True, python_shell=True, **cmd_kwargs)) log.debug('Last command return code: {0}'.format(cmd)) if all([c == 0 for c in cmd]): return {'comment': 'unless execution succeeded', 'skip_watch': True, 'result': True} elif not isinstance(unless, string_types): if unless: log.debug('Command not run: unless did not evaluate to string_type') return {'comment': 'unless execution succeeded', 'skip_watch': True, 'result': True} if isinstance(creates, string_types) and os.path.exists(creates): return {'comment': '{0} exists'.format(creates), 'result': True} elif isinstance(creates, list) and all([ os.path.exists(path) for path in creates ]): return {'comment': 'All files in creates exist', 'result': True} # No reason to stop, return True return True
#!/usr/bin/python import os import pwd import grp import json # DEFINE VARIABLES bool_path = "/etc/consul.d/filament_bool" bool_dict = {"is_available": "True"} uid = pwd.getpwnam('consul').pw_uid gid = grp.getgrnam('consul').gr_gid # MAKE AVAILABLE with open(bool_path, 'w') as outfile: json.dump(bool_dict, outfile) os.chown(bool_path, uid, gid)
def ossec_gid(): return getgrnam("ossec").gr_gid if globals( )['_ossec_gid'] is None else globals()['_ossec_gid']
def gid(self): return grp.getgrnam(self.user).gr_gid
except: e = sys.exc_info()[0] print "Unrecognised exception occured, was unable to create template (returned %s), terminating..." % e sys.exit(0) # This should be a return 0 to prevent the container from restarting. # Change owner and group try: template_list[template_item]['uid'] = pwd.getpwnam(template_list[template_item]['user']).pw_uid except KeyError as e: errormsg = "The user %s does not exist for template %s" % template_list[template_item]['user'], template_item errormsg += "(returned %s), terminating..." % e print errormsg sys.exit(0) # This should be a return 0 to prevent the container from restarting try: template_list[template_item]['gid'] = grp.getgrnam(template_list[template_item]['group']).gr_gid except KeyError as e: errormsg = "The group %s does not exist for template %s" % template_list[template_item]['group'], template_item errormsg += "(returned %s), terminating..." % e print errormsg sys.exit(0) # This should be a return 0 to prevent the container from restarting try: os.chown(template_list[template_item]['path'], template_list[template_item]['uid'], template_list[template_item]['gid']) except OSError as e: errormsg = "The file %s could not be chowned for template" % template_list[template_item]['path'] errormsg += " %s (returned %s), terminating..." % template_item, e print errormsg sys.exit(0) # This should be a return 0 to prevent the container from restarting
def _restore_account_info(acctName, thisUID, acctGroup, thisGID, acctGECOS, acctDir, acctShell, gr_mem, supGroup, supGID): logger = TCSLogger.TCSLogger.getInstance() msg = "" try: new_gid = int(grp.getgrnam(acctGroup)[2]) if new_gid != thisGID: msg = "Newly added GID does not match requested GID, files restored using requested GID" logger.log_notice( 'sb_utils.acctmgt.acctfiles._restore_account_info', msg) except KeyError: if sb_utils.os.info.is_solaris() == True: cmd = "/usr/sbin/groupadd -g %s %s" % (str(thisGID), acctGroup) else: cmd = "/usr/sbin/groupadd -r -g %s %s" % (str(thisGID), acctGroup) output_tuple = tcs_utils.tcs_run_cmd(cmd, True) if output_tuple[0] != 0: msg = "Attempt to add '%s' group account failed: %s" % ( acctGroup, output_tuple[2]) logger.log_err('sb_utils.acctmgt.acctfiles._restore_account_info', msg) return False # try: ftp_grp_struct = grp.getgrnam(acctGroup) except KeyError: msg = "Attempt to add '%s' group account failed: %s" % ( acctGroup, output_tuple[2]) logger.log_err('sb_utils.acctmgt.acctfiles._restore_account_info', msg) return False if sb_utils.os.info.is_solaris() == True: cmd = "/usr/sbin/useradd -u %s -g %d -s '%s' -d %s -c '%s' %s " % ( str(thisUID), ftp_grp_struct.gr_gid, acctShell, acctDir, acctGECOS, acctName) else: cmd = "/usr/sbin/useradd -u %s -g %d -s '%s' -M -d %s -c '%s' %s" % ( str(thisUID), ftp_grp_struct.gr_gid, acctShell, acctDir, acctGECOS, acctName) output_tuple = tcs_utils.tcs_run_cmd(cmd, True) if output_tuple[0] != 0: msg = "Attempt to add '%s' account failed: %s" % (acctName, output_tuple[2]) logger.log_err('sb_utils.acctmgt.acctfiles._restore_account_info', msg) return False if supGroup != None and supGID != None: if sb_utils.os.info.is_solaris() == True: cmd = "/usr/sbin/groupadd -g %s %s" % (str(supGID), supGroup) else: cmd = "/usr/sbin/groupadd -r -g %s %s" % (str(supGID), supGroup) output_tuple = tcs_utils.tcs_run_cmd(cmd, True) if output_tuple[0] != 0: msg = "Attempt to add '%s' supplimental group account failed: %s" % ( supGroup, output_tuple[2]) logger.log_warn('sb_utils.acctmgt.acctfiles._restore_account_info', msg) # try: ftp_grp_struct = grp.getgrnam(supGroup) except KeyError: msg = "Attempt to add '%s' supplimental group account failed: %s" % ( supGroup, output_tuple[2]) logger.log_warn('sb_utils.acctmgt.acctfiles._restore_account_info', msg) if gr_mem != []: # get a list of all groups, so we can verify that all of the groups that this user was a member of still exist all_group = [gr.gr_name for gr in grp.getgrall()] grp_to_append = [] grp_missing = [] for gr in gr_mem: if gr in all_group: grp_to_append.append(gr) else: grp_missing.append(gr) if grp_missing: msg = "Unable to add user '%s' to missing groups %s" % ( acctName, ','.join(grp_missing)) logger.log_warn('sb_utils.acctmgt.acctfiles._restore_account_info', msg) if grp_to_append: msg = "Adding user '%s' to following groups %s" % ( acctName, ','.join(grp_to_append)) cmd = "/usr/sbin/usermod -G %s %s" % (','.join(grp_to_append), acctName) output_tuple = tcs_utils.tcs_run_cmd(cmd, True) if output_tuple[0] != 0: msg = "Unable to add '%s' to one or more groups: %s" % ( acctName, output_tuple[2]) logger.log_warn( 'sb_utils.acctmgt.acctfiles._restore_account_info', msg) return True
def do_setup(self, *args): try: self.qubes_gid = grp.getgrnam('qubes').gr_gid qubes_users = grp.getgrnam('qubes').gr_mem if len(qubes_users) < 1: self.showErrorMessage( _("You must create a user account to create default VMs.")) return else: self.qubes_user = qubes_users[0] if self.check_advanced.get_active(): return errors = [] os.setgid(self.qubes_gid) os.umask(0o0007) self.configure_default_kernel() # Finish template(s) installation, because it wasn't fully possible # from anaconda (it isn't possible to start a VM there). # This is specific to firstboot, not general configuration. for template in os.listdir('/var/lib/qubes/vm-templates'): try: self.configure_template( template, '/var/lib/qubes/vm-templates/' + template) except Exception as e: errors.append((self.stage, str(e))) self.configure_dom0() self.configure_default_template() self.configure_qubes() if self.choice_network.get_selected(): self.configure_network() if self.choice_usb.get_selected( ) and not self.choice_usb_with_net.get_selected(): # Workaround for #1464 (so qvm.start from salt can't be used) self.run_command( ['systemctl', 'start', '*****@*****.**']) try: self.configure_default_dvm() except Exception as e: errors.append((self.stage, str(e))) if errors: msg = "" for (stage, error) in errors: msg += "{} failed:\n{}\n\n".format(stage, error) raise Exception(msg) except Exception as e: self.showErrorMessage(str(e)) finally: self.thread_dialog.done() self.done = True
def _parse_group(group): try: return grp.getgrnam(group).gr_gid except KeyError: pass return int(group)
def removeSysAcct(sysAcctName=None, extraDirs=None, extraFiles=None): logger = TCSLogger.TCSLogger.getInstance() if sysAcctName == None: msg = "Unable to remove account, no user name given to remove" logger.log_err('sb_utils.acctmgt.files.removeSysAcct', msg) return None if type(extraFiles) == type(""): extraFiles = extraFiles.split() elif type(extraFiles) != type([]): extraFiles = [] if type(extraDirs) == type(""): extraDirs = extraDirs.split() elif type(extraDirs) != type([]): extraDirs = [] # get info to rebuild account try: pwent = pwd.getpwnam(sysAcctName) except KeyError: msg = "Unable to remove account '%s', no such user" % sysAcctName logger.log_err('sb_utils.acctmgt.acctfiles.removeSysAcct', msg) return None try: grent = grp.getgrnam(sysAcctName) uname_gname_match = True except KeyError: grent = None msg = "No matching group name for '%s'" % sysAcctName logger.log_info('sb_utils.acctmgt.acctfiles.removeSysAcct', msg) uname_gname_match = False acct_user = pwent.pw_uid acct_gid = pwent.pw_gid acct_gecos = pwent.pw_gecos acct_dir = pwent.pw_dir acct_shell = pwent.pw_shell primarygrent = grp.getgrgid(acct_gid) acct_group = primarygrent.gr_name grp_mem = [] for gr in grp.getgrall(): if sysAcctName in gr.gr_mem: grp_mem.append(gr.gr_name) account_record = { 'uname': sysAcctName, 'uid': acct_user, 'gname': acct_group, 'gid': acct_gid, 'gecos': acct_gecos, 'homedir': acct_dir, 'shell': acct_shell, 'gr_mem': grp_mem } # Corner case: detect accounts like 'games', where there is a username 'games', and a group 'games' # but the primary group of the user does *not* match the group name. Otherwise when we delete the # user account the *group* will also be delete, but we won't be able to restore things correctly # when we undo what we did. So in the case where there are both a user and group with this name, # but the user primary GID is not that of the group (ie GID(user) != GID(group) where user==group) # we need supplemental info to recreate the group (if it didn't exists later). if sysAcctName != acct_group and grent != None: account_record['supgname'] = grent.gr_name account_record['supgid'] = grent.gr_gid change_attrs = { 'owner': 'root', 'group': 'root', 'if_user_is': sysAcctName, 'dacs': '0000' } # Do we have a group whose name *matches* this host name? If so, then pass that through as well to match for ownership # as some files may be owned by either by the uid or gid matching this account name. I really don't want to have to # implement different behaviors for RH(4/5), SUSE11, and Fed(10/11/12/...) if uname_gname_match == True: change_attrs.update({'if_group_is': sysAcctName}) # add potential mail, spool, and other stuff in /var files if not already in the extrafiles fields for spoolbasedir in ["/var/spool", "/var/mail", "/var/log", "/var/run"]: spooldir = "%s/%s" % (spoolbasedir, sysAcctName) if spooldir not in extraDirs: extraDirs.append(spooldir) # alter file ownerships.. msg = "Changing ownership of files owned by '%s' in the home account (%s) to root:root, with perms of 000." % ( sysAcctName, acct_dir) logger.log_info('sb_utils.acctmgt.acctfiles.removeSysAcct', msg) options = {'recursive': True, 'checkOnly': False, 'exactDACs': True} file_changes = sb_utils.file.fileperms.change_file_attributes( pathname=acct_dir, changes=change_attrs, options=options) for extraDir in extraDirs: if extraDir == acct_dir: continue thisrec = sb_utils.file.fileperms.change_file_attributes( pathname=extraDir, changes=change_attrs, options=options) if thisrec != None: file_changes.update(thisrec) # Ok, regardless now of whether or not a file is or is not a directory, do *not* recurse for extrafiles options['recursive'] = False for extraFile in extraFiles: if extraFile == acct_dir: continue thisrec = sb_utils.file.fileperms.change_file_attributes( pathname=extraFile, changes=change_attrs, options=options) if thisrec != None: file_changes.update(thisrec) # go delete the account itself cmd = "/usr/sbin/userdel %s" % sysAcctName output_tuple = [0] output_tuple = tcs_utils.tcs_run_cmd(cmd, True) if output_tuple[0] != 0: msg = 'Attempt to delete %s account failed.' % sysAcctName logger.log_err('sb_utils.acctmgt.acctfiles.removeSysAcct', 'Apply Error: ' + msg) msg = 'Reverting file renames' logger.log_err('sb_utils.acctmgt.acctfiles.removeSysAcct', 'Apply Error: ' + msg) # we have an exact set, so call directly in to alter the files sb_utils.file.fileperms.change_bulk_file_attributes(file_changes) return None return {'acctinfo': account_record, 'filelist': file_changes}
if not os.path.exists("/".join(["/proc", str(pid), "cmdline"])): # Pid is not real os.unlink(options.pidfile) pid = None print >> sys.stderr, ( "WARN: Bogus pid file was found. I deleted it.") else: print >> sys.stderr, ( "ERROR: Pidfile exists. Server already running?") sys.exit(1) # Get final GIDs if os.name != 'nt': if options.group is not None: gid = grp.getgrnam(options.group).gr_gid elif len(config['server']['group']): gid = grp.getgrnam(config['server']['group']).gr_gid # Get final UID if os.name != 'nt': if options.user is not None: uid = pwd.getpwnam(options.user).pw_uid elif len(config['server']['user']): uid = pwd.getpwnam(config['server']['user']).pw_uid # Fix up pid permissions if not options.foreground and not options.collector: # Write pid file pid = str(os.getpid()) try:
def __init__(self, rootObj): self.rootObj = rootObj self.essential_mounts = [] # /proc, /sys ... normally managed by systemd self.managed_mounts = [] # mounts owned by mock self.user_mounts = [] # mounts injected by user self.essential_mounts = [] # Instead of mounting a fresh procfs and sysfs, we bind mount /proc # and /sys. This avoids problems with kernel restrictions if running # within a user namespace, and is pretty much identical otherwise. # The bind mounts additionally need to be recursive, because the # kernel forbids mounts that might reveal parts of the filesystem # that a container runtime overmounted to hide from the container # (rhbz#1745048). for mount in ['proc', 'sys']: mount_point = "/" + mount device = 'mock_hide_{}fs_from_host'.format(mount) host_path = rootObj.make_chroot_path(mount_point) self.essential_mounts += [ # The recursive mount point needs to be later lazy umounted and # it would affect hosts's counterpart sub-mounts as well. To # avoid this, we need to make the mount point and parent mount # point private in unshare()d namespace. But since the parent # mount point of /sys and /proc so far was plain '/' mount (and # we need to keep that one shared, to keep LVM/tmpfs features # working) we crate a new parent mount for the final mountpoint # on the same path. So the mount graph looks like: # / (shared) -> /sys (private) -> /sys (recursive, private) # # Acknowledgement, IOW: We mount on host_path twice and it is # expected. This is because when you umount 'rprivate' mount # then parent mount point is notified .. so first we mount tmpfs # stub which we actually never use -- but is private -- and only # then we mount above the actual mount point. This prevents # from umount events to propagate to host from chroot. FileSystemMountPoint(filetype='tmpfs', device=device, path=host_path, options="rprivate"), BindMountPoint(srcpath=mount_point, bindpath=host_path, recursive=True, options="nodev,noexec,nosuid,readonly,rprivate"), ] if rootObj.config['internal_dev_setup']: self.essential_mounts.append( FileSystemMountPoint( filetype='tmpfs', device='mock_chroot_shmfs', path=rootObj.make_chroot_path('/dev/shm') ) ) opts = 'gid=%d,mode=0620,ptmxmode=0666' % grp.getgrnam('tty').gr_gid if util.cmpKernelVer(os.uname()[2], '2.6.29') >= 0: opts += ',newinstance' self.essential_mounts.append( FileSystemMountPoint( filetype='devpts', device='mock_chroot_devpts', path=rootObj.make_chroot_path('/dev/pts'), options=opts ) ) self.essential_mounted = all(m.ismounted() for m in self.essential_mounts)
try: Specified_Owner_ID = pwd.getpwnam(fc.Owner)[2] except KeyError, error: Print("Exception obtaining gid from group name " + fc.Group + " Error: " + str(error), file=sys.stderr) LG().Log('ERROR', "Exception obtaining gid from group name " + fc.Group + " Error: " + str(error)) return False if Specified_Owner_ID != pwd.getpwuid(stat_info.st_uid)[2]: return False elif SourcePath: # Owner wasn't specified, if SourcePath is specified then check that the Owners match if pwd.getpwuid(stat_info.st_uid)[2] != pwd.getpwuid(stat_info_src.st_uid)[2]: return False if fc.Group: try: Specified_Group_ID = grp.getgrnam(fc.Group)[2] except KeyError, error: Print("Exception obtaining gid from group name " + fc.Group + " Error: " + str(error), file=sys.stderr) LG().Log('ERROR', "Exception obtaining gid from group name " + fc.Group + " Error: " + str(error)) return False if Specified_Group_ID != grp.getgrgid(stat_info.st_gid)[2]: return False elif SourcePath: # Group wasn't specified, if SourcePath is specified then check that the Groups match if grp.getgrgid(stat_info.st_gid)[2] != grp.getgrgid(stat_info_src.st_gid)[2]: return False # Mode is irrelevant to symlinks if not os.path.islink(DestinationPath): if fc.Mode: if str(oct(stat_info.st_mode))[-3:] != fc.Mode: return False
def list_staff(group='ocfstaff'): """Return a list of staff members. :param group: UNIX group to use to determine if someone is a staff member. """ return grp.getgrnam(group).gr_mem
class DirSheet(Sheet): 'Sheet displaying directory, using ENTER to open a particular file. Edited fields are applied to the filesystem.' rowtype = 'files' # rowdef: (Path, stat) commands = [ Command(ENTER, 'vd.push(openSource(cursorRow[0]))', 'open current file as a new sheet', 'sheet-open-row'), Command('g' + ENTER, 'for r in selectedRows: vd.push(openSource(r[0].resolve()))', 'open selected files as new sheets', 'sheet-open-rows'), Command('^O', 'launchEditor(cursorRow[0].resolve())', 'open current file in external $EDITOR', 'edit-row-external'), Command('g^O', 'launchEditor(*(r[0].resolve() for r in selectedRows))', 'open selected files in external $EDITOR', 'edit-rows-external'), Command('^S', 'save()', 'apply all changes on all rows', 'sheet-specific-apply-edits'), Command('z^S', 'save(cursorRow)', 'apply changes to current row', 'sheet-specific-apply-edits'), Command('z^R', 'undoMod(cursorRow); restat(cursorRow)', 'undo pending changes to current row', 'sheet-specific-apply-edits'), Command( 'modify-delete-row', 'if cursorRow not in toBeDeleted: toBeDeleted.append(cursorRow); cursorRowIndex += 1' ), Command('modify-delete-selected', 'deleteFiles(selectedRows)') ] columns = [ # these setters all either raise or return None, so this is a non-idiomatic 'or' to squeeze in a restat DeferredSetColumn( 'directory', getter=lambda col, row: row[0].parent.relpath(col.sheet.source. resolve()), setter=lambda col, row, val: col.sheet.moveFile(row, val)), DeferredSetColumn( 'filename', getter=lambda col, row: row[0].name + row[0].ext, setter=lambda col, row, val: col.sheet.renameFile(row, val)), Column( 'ext', getter=lambda col, row: row[0].is_dir() and '/' or row[0].suffix), DeferredSetColumn('size', type=int, getter=lambda col, row: row[1].st_size, setter=lambda col, row, val: os.truncate( row[0].resolve(), int(val))), DeferredSetColumn( 'modtime', type=date, getter=lambda col, row: row[1].st_mtime, setter=lambda col, row, val: os.utime( row[0].resolve(), times=((row[1].st_atime, float(val))))), DeferredSetColumn( 'owner', width=0, getter=lambda col, row: pwd.getpwuid(row[1].st_uid).pw_name, setter=lambda col, row, val: os.chown(row[0].resolve(), pwd.getpwnam(val).pw_uid, -1 )), DeferredSetColumn( 'group', width=0, getter=lambda col, row: grp.getgrgid(row[1].st_gid).gr_name, setter=lambda col, row, val: os.chown(row[0].resolve(), -1, grp.getgrnam(val).pw_gid)), DeferredSetColumn('mode', width=0, type=int, fmtstr='{:o}', getter=lambda col, row: row[1].st_mode), Column('filetype', width=40, cache=True, getter=lambda col, row: subprocess.Popen( ['file', '--brief', row[0].resolve()], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()[0].strip()), ] colorizers = [ Colorizer('cell', 4, lambda s, c, r, v: s.colorOwner(s, c, r, v)), Colorizer( 'cell', 8, lambda s, c, r, v: options.color_change_pending if s.changed(c, r) else None), Colorizer( 'row', 9, lambda s, c, r, v: options.color_delete_pending if r in s.toBeDeleted else None), ] nKeys = 2 @staticmethod def colorOwner(sheet, col, row, val): path, st = row mode = st.st_mode ret = '' if col.name == 'group': if mode & stat.S_IXGRP: ret = 'bold ' if mode & stat.S_IWGRP: return ret + 'green' if mode & stat.S_IRGRP: return ret + 'yellow' elif col.name == 'owner': if mode & stat.S_IXUSR: ret = 'bold ' if mode & stat.S_IWUSR: return ret + 'green' if mode & stat.S_IRUSR: return ret + 'yellow' def changed(self, col, row): return isinstance(col, DeferredSetColumn) and col.changed(row) def deleteFiles(self, rows): for r in rows: if r not in self.toBeDeleted: self.toBeDeleted.append(r) def moveFile(self, row, val): fn = row[0].name + row[0].ext newpath = os.path.join(val, fn) if not newpath.startswith('/'): newpath = os.path.join(self.source.resolve(), newpath) parent = Path(newpath).parent if parent.exists(): if not parent.is_dir(): error('destination %s not a directory' % parent) else: with contextlib.suppress(FileExistsError): os.makedirs(parent.resolve()) os.rename(row[0].resolve(), newpath) row[0] = Path(newpath) self.restat(row) def renameFile(self, row, val): newpath = row[0].with_name(val) os.rename(row[0].resolve(), newpath.resolve()) row[0] = newpath def removeFile(self, row): path, _ = row if path.is_dir(): os.rmdir(path.resolve()) else: os.remove(path.resolve()) def undoMod(self, row): for col in self.visibleCols: if col._cachedValues and id(row) in col._cachedValues: del col._cachedValues[id(row)] if row in self.toBeDeleted: self.toBeDeleted.remove(row) def save(self, *rows): changes = [] deletes = {} for r in list( rows or self.rows): # copy list because elements may be removed if r in self.toBeDeleted: deletes[id(r)] = r else: for col in self.visibleCols: if self.changed(col, r): changes.append((col, r)) if not changes and not deletes: error('nothing to save') cstr = '' if changes: cstr += 'change %d attributes' % len(changes) if deletes: if cstr: cstr += ' and ' cstr += 'delete %d files' % len(deletes) confirm('really %s? ' % cstr) self._commit(changes, deletes) @asyncthread def _commit(self, changes, deletes): oldrows = self.rows self.rows = [] for r in oldrows: try: if id(r) in deletes: self.removeFile(r) else: self.rows.append(r) except Exception as e: exceptionCaught(e) for col, row in changes: try: col.realsetter(col, row, col._cachedValues[id(row)]) self.restat(r) except Exception as e: exceptionCaught(e) @asyncthread def reload(self): self.toBeDeleted = [] self.rows = [] basepath = self.source.resolve() for folder, subdirs, files in os.walk(basepath): subfolder = folder[len(basepath) + 1:] if subfolder.startswith('.'): continue for fn in files: if fn.startswith('.'): continue p = Path(os.path.join(folder, fn)) self.rows.append([p, p.stat()]) self.rows.sort() def restat(self, row): row[1] = row[0].stat()
def load_cmdline_options(self): self.nodaemon = self.cmdline_options.nodaemon if self.cmdline_options.disable_swap: self.disable_swap = True log.setloglevel(verbosity_dict[self.cmdline_options.loglevel]) self.bind_address = self.cmdline_options.ip self.socks_host = self.cmdline_options.socks_host if not self.validate_port(self.cmdline_options.socks_port): sys.exit(1) self.socks_port = self.cmdline_options.socks_port if platform.system() != 'Windows': if (self.cmdline_options.user and self.cmdline_options.group is None) or \ (self.cmdline_options.group and self.cmdline_options.user is None): self.print_msg("Error: missing user or group option") sys.exit(1) if self.cmdline_options.user and self.cmdline_options.group: import grp import pwd self.user = self.cmdline_options.user self.group = self.cmdline_options.group self.uid = pwd.getpwnam(self.cmdline_options.user).pw_uid self.gid = grp.getgrnam(self.cmdline_options.group).gr_gid if self.uid == 0 or self.gid == 0: self.print_msg("Invalid user: cannot run as root") sys.exit(1) if self.cmdline_options.devel_mode: self.set_devel_mode() self.orm_debug = self.cmdline_options.orm_debug if self.cmdline_options.working_path: self.working_path = self.cmdline_options.working_path self.api_prefix = self.cmdline_options.api_prefix if self.cmdline_options.client_path: self.client_path = os.path.abspath(os.path.join(self.src_path, self.cmdline_options.client_path)) self.eval_paths() if self.nodaemon: self.print_msg("Going in background; log available at %s" % Settings.logfile) # special evaluation of client directory: indexfile = os.path.join(self.client_path, 'index.html') if os.path.isfile(indexfile): self.print_msg("Serving the client from directory: %s" % self.client_path) else: self.print_msg("Unable to find a directory to load the client from") sys.exit(1)