コード例 #1
0
ファイル: test_memory_cache.py プロジェクト: 3van/sssd
def test_removed_mc(ldap_conn, sanity_rfc2307):
    """
    Regression test for ticket:
    https://fedorahosted.org/sssd/ticket/2726
    """

    ent.assert_passwd_by_name(
        'user1',
        dict(name='user1', passwd='*', uid=1001, gid=2001,
             gecos='1001', shell='/bin/bash'))
    ent.assert_passwd_by_uid(
        1001,
        dict(name='user1', passwd='*', uid=1001, gid=2001,
             gecos='1001', shell='/bin/bash'))

    ent.assert_group_by_name("group1", dict(name="group1", gid=2001))
    ent.assert_group_by_gid(2001, dict(name="group1", gid=2001))
    stop_sssd()

    # remove cache without invalidation
    for path in os.listdir(config.MCACHE_PATH):
        os.unlink(config.MCACHE_PATH + "/" + path)

    # sssd is stopped; so the memory cache should not be used
    # in long living clients (py.test in this case)
    with pytest.raises(KeyError):
        pwd.getpwnam('user1')
    with pytest.raises(KeyError):
        pwd.getpwuid(1001)

    with pytest.raises(KeyError):
        grp.getgrnam('group1')
    with pytest.raises(KeyError):
        grp.getgrgid(2001)
コード例 #2
0
ファイル: au.py プロジェクト: jsenko/repour
def gid_exists(gid):
    try:
        grp.getgrgid(gid)
    except KeyError as e:
        return False
    else:
        return True
コード例 #3
0
ファイル: groups.py プロジェクト: PeterJCLaw/srusers
    def __get_new_gidNumber( self ):
        """Finds the next available GID"""
        groups = get_conn().search_st( "ou=groups,o=sr",
                                   ldap.SCOPE_ONELEVEL,
                                   filterstr = "(objectClass=posixGroup)",
                                   attrlist = ["gidNumber"] )
        gids = []

        for gid in [int(x[1]["gidNumber"][0]) for x in groups]:
            gids.append(gid)

        gid = 2999
        while True:
            gid += 1

            if gid in gids:
                "An ldap group with that gid already exists"
                continue

            try:
                grp.getgrgid(gid)
            except KeyError:
                "The group isn't in the local stuff either"
                break

        return gid
コード例 #4
0
ファイル: util.py プロジェクト: PaulNendick/circus
    def to_gid(name):  # NOQA
        """Return a gid, given a group name

        If the group name is unknown, raises a ValueError.
        """
        try:
            name = int(name)
        except ValueError:
            pass

        if isinstance(name, int):
            try:
                grp.getgrgid(name)
                return name
            # getgrid may raises overflow error on mac/os x,
            # fixed in python2.7.5
            # see http://bugs.python.org/issue17531
            except (KeyError, OverflowError):
                raise ValueError("No such group: %r" % name)

        from circus.py3compat import string_types  # circular import fix

        if not isinstance(name, string_types):
            raise TypeError(name)

        try:
            return grp.getgrnam(name).gr_gid
        except KeyError:
            raise ValueError("No such group: %r" % name)
コード例 #5
0
ファイル: mounts.py プロジェクト: Partypapa/mineos
    def dashboard(self):
        from procfs_reader import entries, proc_uptime, disk_free, git_hash
        from grp import getgrall, getgrgid
        from pwd import getpwnam
        from stock_profiles import STOCK_PROFILES
        
        kb_free = dict(entries('', 'meminfo'))['MemFree']
        mb_free = str(round(float(kb_free.split()[0])/1000, 2))

        try:
            pc_path = os.path.join(self.base_directory, mc.DEFAULT_PATHS['profiles'], 'profile.config')
            mc.has_ownership(self.login, pc_path)
        except (OSError, KeyError):
            profile_editable = False
        else:
            profile_editable = True
        finally:
            st = os.stat(pc_path)
            pc_group = getgrgid(st.st_gid).gr_name

        primary_group = getgrgid(getpwnam(self.login).pw_gid).gr_name
    
        return {
            'uptime': int(proc_uptime()[0]),
            'memfree': mb_free,
            'whoami': self.login,
            'group': primary_group,
            'df': dict(disk_free('/')._asdict()),
            'groups': [i.gr_name for i in getgrall() if self.login in i.gr_mem or self.login == 'root'] + [primary_group],
            'pc_permissions': profile_editable,
            'pc_group': pc_group,
            'git_hash': git_hash(os.path.dirname(os.path.abspath(__file__))),
            'stock_profiles': [i['name'] for i in STOCK_PROFILES],
            'base_directory': self.base_directory,
            }
コード例 #6
0
ファイル: prep.py プロジェクト: goura/karesansui
def have_privilege(msg=True):
    """<comment-ja>
    実行可能ユーザーかどうかの判定
    </comment-ja>
    <comment-en>
    Return True if the current process should be able to run karesansui.
    </comment-en>
    """
    import os
    import pwd, grp
    from lib.const import KARESANSUI_USER, KARESANSUI_GROUP

    try:
        ok_gr = grp.getgrnam(KARESANSUI_GROUP)[3]
        ok_gr.append(grp.getgrgid(pwd.getpwnam(KARESANSUI_USER)[3])[0])
    except:
        ok_gr = []

    ret = (grp.getgrgid(os.getgid())[0] in ok_gr)
    if ret is False and msg is True:
        print >>sys.stderr, """
# chgrp -R %s %s
# chmod -R g+w %s
# chgrp -R %s %s
# chmod -R g+w %s
Or check permission of the following directories.
* log file directory
* configuration file directory
""" % (KARESANSUI_GROUP,karesansui.config['application.bin.dir'],karesansui.config['application.bin.dir'], KARESANSUI_GROUP,os.path.dirname(__file__),os.path.dirname(__file__),
)
    return ret
コード例 #7
0
ファイル: test_memory_cache.py プロジェクト: SSSD/sssd
def test_mc_zero_timeout(ldap_conn, zero_timeout_rfc2307):
    """
    Test that the memory cache is not created at all with memcache_timeout=0
    """
    # No memory cache files must be created
    assert len(os.listdir(config.MCACHE_PATH)) == 0

    ent.assert_passwd_by_name(
        'user1',
        dict(name='user1', passwd='*', uid=1001, gid=2001,
             gecos='1001', shell='/bin/bash'))
    ent.assert_passwd_by_uid(
        1001,
        dict(name='user1', passwd='*', uid=1001, gid=2001,
             gecos='1001', shell='/bin/bash'))

    ent.assert_group_by_name("group1", dict(name="group1", gid=2001))
    ent.assert_group_by_gid(2001, dict(name="group1", gid=2001))
    stop_sssd()

    # sssd is stopped; so the memory cache should not be used
    # in long living clients (py.test in this case)
    with pytest.raises(KeyError):
        pwd.getpwnam('user1')
    with pytest.raises(KeyError):
        pwd.getpwuid(1001)

    with pytest.raises(KeyError):
        grp.getgrnam('group1')
    with pytest.raises(KeyError):
        grp.getgrgid(2001)
コード例 #8
0
ファイル: roundup_server.py プロジェクト: AnishShah/roundup
def setgid(group):
    if group is None:
        return
    if not hasattr(os, 'setgid'):
        return

    # if root, setgid to the running user
    if os.getuid():
        print _('WARNING: ignoring "-g" argument, not root')
        return

    try:
        import grp
    except ImportError:
        raise ValueError, _("Can't change groups - no grp module")
    try:
        try:
            gid = int(group)
        except ValueError:
            gid = grp.getgrnam(group)[2]
        else:
            grp.getgrgid(gid)
    except KeyError:
        raise ValueError,_("Group %(group)s doesn't exist")%locals()
    os.setgid(gid)
コード例 #9
0
ファイル: func.py プロジェクト: ztq35795/learngit
    def __init__(self,filename):
        self.Maxsizelen=len("%s"%os.stat(filename[0]).st_size)
        self.Maxlinklen=len("%s"%os.stat(filename[0]).st_nlink)
        self.Maxuserlen=len(pwd.getpwuid(os.stat(filename[0]).st_uid).pw_name)
        self.Maxgrouplen=len(grp.getgrgid(os.stat(filename[0]).st_gid).gr_name)

        #get the max length of each output form
        for name in filename:

            filelinkl=len("%s"%os.stat(name).st_nlink)
            filesizel=len("%s"%os.stat(name).st_size)
            userl=len(pwd.getpwuid(os.stat(name).st_uid).pw_name)
            groupl=len(grp.getgrgid(os.stat(name).st_gid).gr_name)

            if(self.Maxlinklen<=int(filelinkl)):
                self.Maxlinklen=int(filelinkl)

            if(self.Maxsizelen<=int(filesizel)):
                self.Maxsizelen=int(filesizel)

            if(self.Maxuserlen<=int(userl)):
                self.Maxuserlen=int(userl)

            if(self.Maxgrouplen<=int(groupl)):
                self.Maxgrouplen=int(groupl)
コード例 #10
0
ファイル: configread.py プロジェクト: JumpeiArashi/blackbird
def is_group(value):
    """
    Check whether groupname or gid as argument exists.
    if this function recieved groupname, convert gid and exec validation.
    """

    if type(value) == str:
        try:
            entry = grp.getgrnam(value)
            value = entry.gr_gid
        except KeyError:
            err_message = ('{0}: No such group.'.format(value))
            raise validate.VdtValueError(err_message)

        return value

    elif type(value) == int:
        try:
            grp.getgrgid(value)
        except KeyError:
            err_message = ('{0}: No such group.'.format(value))
            raise validate.VdtValueError(err_message)

        return value

    else:
        err_message = ('Please, use str or int to "user" parameter.')
        raise validate.VdtTypeError(err_message)
コード例 #11
0
ファイル: ldap_test.py プロジェクト: lejonet/sssd
def test_sanity_rfc2307(ldap_conn, sanity_rfc2307):
    passwd_pattern = ent.contains_only(
        dict(name='user1', passwd='*', uid=1001, gid=2001, gecos='1001',
             dir='/home/user1', shell='/bin/bash'),
        dict(name='user2', passwd='*', uid=1002, gid=2002, gecos='1002',
             dir='/home/user2', shell='/bin/bash'),
        dict(name='user3', passwd='*', uid=1003, gid=2003, gecos='1003',
             dir='/home/user3', shell='/bin/bash')
    )
    ent.assert_passwd(passwd_pattern)

    group_pattern = ent.contains_only(
        dict(name='group1', passwd='*', gid=2001, mem=ent.contains_only()),
        dict(name='group2', passwd='*', gid=2002, mem=ent.contains_only()),
        dict(name='group3', passwd='*', gid=2003, mem=ent.contains_only()),
        dict(name='empty_group', passwd='*', gid=2010,
             mem=ent.contains_only()),
        dict(name='two_user_group', passwd='*', gid=2012,
             mem=ent.contains_only("user1", "user2"))
    )
    ent.assert_group(group_pattern)

    with pytest.raises(KeyError):
        pwd.getpwnam("non_existent_user")
    with pytest.raises(KeyError):
        pwd.getpwuid(1)
    with pytest.raises(KeyError):
        grp.getgrnam("non_existent_group")
    with pytest.raises(KeyError):
        grp.getgrgid(1)
コード例 #12
0
ファイル: core.py プロジェクト: Epictetus/djangy
def _get_fresh_gid():
    for gid in range(100, 1000):
        try:
            grp.getgrgid(gid)
        except KeyError:
            return gid
    raise GidAllocationException()
コード例 #13
0
ファイル: logger.py プロジェクト: afeset/miner2-tools
    def logEnv(self, header=None):
        """ Collecte and log various enviroment info such as hostname, argumnets """
    
        if header:
            self.debug("Logger-env: prog=%s header='%s'" % (path.progName(), header)  )
             
        self.debug("Logger-env: progPath=%s"     % path.progPath()  )
        self.debug("Logger-env: progAbsPath=%s"  % path.progAbsPath()  )
        self.debug("Logger-env: date=%s"         % time.asctime()    )
        self.debug("Logger-env: argv=%s"         % str(sys.argv)     )
        self.debug("Logger-env: cwd=%s"          % str(os.getcwd())  )
      
        uid = os.getuid()    ;  euid = os.geteuid()
        gid = os.getgid()    ;  egid = os.getegid() 
        self.debug("Logger-env: uid=%s(%d) euid=%s(%d) guid=%s(%d) egid=%s(%d)" % 
                    ( pwd.getpwuid(uid)[0], uid, pwd.getpwuid(euid)[0], euid,
                      grp.getgrgid(gid)[0], gid, grp.getgrgid(egid)[0], egid ) )
                        
        self.debug("Logger-env: pid=%d  ppid=%d  pgpid=%d"  % ( os.getpid(), os.getppid(), os.getpgrp() ) )
        self.debug("Logger-env: hostname=%s  uname=%s"      % ( socket.gethostname(), str(os.uname())   ) )
      
        self.debug("Logger-env: pythonVersion=%s" % sys.version.replace("\n","")  )
        self.debug("Logger-env: pythonPath=%s"    % str(sys.path)  )

        # Additonal info at lower lever
        for v in sorted(os.environ.keys()):
            self.debug("Logger-env:     os.environ[%s]=%s"  % (v, os.environ[v]) )
コード例 #14
0
    def test_rsync_set_group(self):
        """Test setting the group membership on rsync'd files
        """

        root = tempfile.mkdtemp(prefix="rsync_test_set_group_")
        avail_groups = os.getgroups()
        exp_group = grp.getgrgid(avail_groups[random.randint(1, len(avail_groups)) - 1])[0]

        # Create some files to move
        to_copy = self._create_test_files(root)

        # Run rsync
        with open(os.devnull, "w") as f:
            old_stdout = sys.stdout
            sys.stdout = f
            rsync_files(to_copy, sys.stdout, exp_group, False)
            sys.stdout = old_stdout

        # Verify the copy process set the correct group on created directories
        for ddir in set([d[1] for d in to_copy]):
            gid = os.stat(ddir).st_gid
            obs_group = grp.getgrgid(gid)[0]
            self.assertEqual(
                obs_group, exp_group, "Failed to set group '{}' on directory. Group is {}".format(exp_group, obs_group)
            )

        # Verify the copy process set the correct group
        for src, ddir, dname in to_copy:
            dfile = os.path.join(ddir, dname)
            gid = os.stat(dfile).st_gid
            obs_group = grp.getgrgid(gid)[0]
            self.assertEqual(
                obs_group, exp_group, "Failed to set group '{}' on file. Group is {}".format(exp_group, obs_group)
            )
コード例 #15
0
ファイル: pymaidentity.py プロジェクト: pudquick/pymaIdentity
 def __init__(self, name_or_gid = None):
     # If passed a string, assume group name
     # If passed a number, assume gid
     # If None, leave everything with a value of None
     
     # Initialize everything to None
     for i in self._fields:
         setattr(self, i, None)
     
     # Determine whether we were passed a name or a gid or a Group
     if isinstance(name_or_gid, Group):
         # Guessing it's a Group object - clone the settings
         # Clone if user name or gid present, otherwise None
         if name_or_gid != None:
             if name_or_gid.name is not None:
                 gr_info = grp.getgrnam(name_or_gid.name)
             else:
                 gr_info = grp.getgrgid(name_or_gid.gid)
             self._init_with_grp(gr_info)
     elif isinstance(name_or_gid, (int,long)):
         # Guessing it's a gid
         try:
             gr_info = grp.getgrgid(name_or_gid)
             self._init_with_grp(gr_info)
         except KeyError:
             self.gid = None
     elif isinstance(name_or_gid, basestring):
         # Guessing it's a group name
         try:
             gr_info = grp.getgrnam(name_or_gid)
             self._init_with_grp(gr_info)
         except KeyError:
             self.name = None
コード例 #16
0
ファイル: test_env.py プロジェクト: wyatt88/pcs
    def test_set_desired_file_access(self):
        #setup
        file_path = rc("temp-keyfile")
        if os.path.exists(file_path):
            os.remove(file_path)
        with open(file_path, "w") as file:
            file.write("content")

        #check assumptions
        stat = os.stat(file_path)
        self.assertNotEqual('600', oct(stat.st_mode)[-3:])
        current_user = pwd.getpwuid(os.getuid())[0]
        if current_user != settings.pacemaker_uname:
            file_user = pwd.getpwuid(stat.st_uid)[0]
            self.assertNotEqual(file_user, settings.pacemaker_uname)
        current_group = grp.getgrgid(os.getgid())[0]
        if current_group != settings.pacemaker_gname:
            file_group = grp.getgrgid(stat.st_gid)[0]
            self.assertNotEqual(file_group, settings.pacemaker_gname)

        #run tested method
        env.set_keyfile_access(file_path)

        #check
        stat = os.stat(file_path)
        self.assertEqual('600', oct(stat.st_mode)[-3:])

        file_user = pwd.getpwuid(stat.st_uid)[0]
        self.assertEqual(file_user, settings.pacemaker_uname)

        file_group = grp.getgrgid(stat.st_gid)[0]
        self.assertEqual(file_group, settings.pacemaker_gname)
コード例 #17
0
ファイル: setup.py プロジェクト: d3zd3z/davidb-docker
    def make_user(self):
        if self.uid == 0:
            return

        # If the user has already been created, don't do it again.
        ufile = "/.setup-{}".format(self.uid)
        if os.path.isfile(ufile):
            return

        os.makedirs(self.home, exist_ok=True)
        os.chown(self.home, self.uid, self.gid)
        try:
            grp.getgrgid(self.gid)
        except KeyError:
            check_call(["addgroup", "-g", str(self.gid), self.user])
        check_call(["adduser", "-u", str(self.uid),
            "-g", str(self.gid),
            "-G", "wheel",
            "-D",
            self.user])

        # Ensure that this user can sudo without a password.
        # Unfortunately, sudo -v still wants a password.
        with open("/etc/sudoers", "a") as fd:
            print("{} ALL = NOPASSWD: ALL".format(self.user), file=fd)
            print("%sudo ALL=(ALL) NOPASSWD: ALL", file=fd)

        # Mark that the setup has run, so it doesn't get rerun in the
        # same image.
        with open(ufile, "w"):
            pass
コード例 #18
0
ファイル: test_enumeration.py プロジェクト: jhrozek/sssd
def test_sanity_rfc2307_bis(ldap_conn, sanity_rfc2307_bis):
    passwd_pattern = ent.contains_only(
        dict(name="user1", passwd="*", uid=1001, gid=2001, gecos="1001", dir="/home/user1", shell="/bin/bash"),
        dict(name="user2", passwd="*", uid=1002, gid=2002, gecos="1002", dir="/home/user2", shell="/bin/bash"),
        dict(name="user3", passwd="*", uid=1003, gid=2003, gecos="1003", dir="/home/user3", shell="/bin/bash"),
    )
    ent.assert_passwd(passwd_pattern)

    group_pattern = ent.contains_only(
        dict(name="group1", passwd="*", gid=2001, mem=ent.contains_only()),
        dict(name="group2", passwd="*", gid=2002, mem=ent.contains_only()),
        dict(name="group3", passwd="*", gid=2003, mem=ent.contains_only()),
        dict(name="empty_group1", passwd="*", gid=2010, mem=ent.contains_only()),
        dict(name="empty_group2", passwd="*", gid=2011, mem=ent.contains_only()),
        dict(name="two_user_group", passwd="*", gid=2012, mem=ent.contains_only("user1", "user2")),
        dict(name="group_empty_group", passwd="*", gid=2013, mem=ent.contains_only()),
        dict(name="group_two_empty_groups", passwd="*", gid=2014, mem=ent.contains_only()),
        dict(name="one_user_group1", passwd="*", gid=2015, mem=ent.contains_only("user1")),
        dict(name="one_user_group2", passwd="*", gid=2016, mem=ent.contains_only("user2")),
        dict(name="group_one_user_group", passwd="*", gid=2017, mem=ent.contains_only("user1")),
        dict(name="group_two_user_group", passwd="*", gid=2018, mem=ent.contains_only("user1", "user2")),
        dict(name="group_two_one_user_groups", passwd="*", gid=2019, mem=ent.contains_only("user1", "user2")),
    )
    ent.assert_group(group_pattern)

    with pytest.raises(KeyError):
        pwd.getpwnam("non_existent_user")
    with pytest.raises(KeyError):
        pwd.getpwuid(1)
    with pytest.raises(KeyError):
        grp.getgrnam("non_existent_group")
    with pytest.raises(KeyError):
        grp.getgrgid(1)
コード例 #19
0
def is_DirectoryWriteable(dirname, use_username):
#   code snippet from stackoverflow
#   get all group ids which can be used by use_username
  gids = [g.gr_gid for g in getgrall() if use_username in g.gr_mem]
#   get groupid of the use_username
  gid = getpwnam(use_username).pw_gid
  gids.append(getgrgid(gid).gr_gid)
#   get the group names
  usergroups = [getgrgid(gid).gr_name for gid in gids]

  dirstat = osstat(dirname)
  dirusername = getpwuid(dirstat.st_uid)[0]
  dirusergroup = getgrgid(dirstat.st_gid)[0]
#   check if directory belongs to user and writeable
  if dirusername == use_username and bool(dirstat.st_mode & stat.S_IWUSR):
#     module_logger.info('{0}: "{1}" writeable'.format(getframe().f_code.co_name, dirname))
    return True
#   check if group of the directory in usergroup and writeable
  elif dirusergroup in usergroups and bool(dirstat.st_mode & stat.S_IWGRP):
#     module_logger.info('{0}: "{1}" writeable'.format(getframe().f_code.co_name, dirname))
    return True
#   check if writeable for anyone
  elif bool(dirstat.st_mode & stat.S_IWOTH):
#     module_logger.info('{0}: "{1}" writeable'.format(getframe().f_code.co_name, dirname))
    return True
  else:
#     module_logger.error('{0}: "{1}" not writeable for "{2}"'.format(getframe().f_code.co_name, dirname, use_username))
    return False
コード例 #20
0
ファイル: start.py プロジェクト: durandj/mymcadmin
    def _convert_group(group):
        if isinstance(group, int):
            grp.getgrgid(group)

            return group
        else:
            return grp.getgrnam(group).gr_gid
コード例 #21
0
ファイル: utils.py プロジェクト: ryankaiser/python_agcs
 def __init__(self, gid=None, name=None):
     if gid:
         self._grp = grp.getgrgid(gid)
     elif name:
         self._grp = grp.getgrnam(name)
     else:
         self._grp = grp.getgrgid(os.getgid())
コード例 #22
0
ファイル: nxFile.py プロジェクト: 40a/WPSDSCLinux
def TestOwnerGroupMode(DestinationPath, SourcePath, fc):
    stat_info = os.lstat(DestinationPath)

    if SourcePath:
        stat_info_src = os.lstat(SourcePath)

    if fc.Owner:
        Specified_Owner_ID = pwd.getpwnam(fc.Owner)[2]
        if Specified_Owner_ID != pwd.getpwuid(stat_info.st_uid)[2]:
            return False
    elif SourcePath:
        # Owner wasn't specified, if SourcePath is specified then check that the Owners match
        if pwd.getpwuid(stat_info.st_uid)[2] != pwd.getpwuid(stat_info_src.st_uid)[2]:
            return False

    if fc.Group:
        Specified_Group_ID = grp.getgrnam(fc.Group)[2]
        if Specified_Group_ID != grp.getgrgid(stat_info.st_gid)[2]:
            return False
    elif SourcePath:
        # Group wasn't specified, if SourcePath is specified then check that the Groups match
        if grp.getgrgid(stat_info.st_gid)[2] != grp.getgrgid(stat_info_src.st_gid)[2]:
            return False
    
    # Mode is irrelevant to symlinks
    if not os.path.islink(DestinationPath):
        if fc.Mode:
            if str(oct(stat_info.st_mode))[-3:] != fc.Mode:
                return False
        elif SourcePath:
            # Mode wasn't specified, if SourcePath is specified then check that the Modes match
            if str(oct(stat_info.st_mode))[-3:] != str(oct(stat_info_src.st_mode))[-3:]:
                return False

    return True
コード例 #23
0
ファイル: __init__.py プロジェクト: AbhishekKumarSingh/galaxy
def umask_fix_perms( path, umask, unmasked_perms, gid=None ):
    """
    umask-friendly permissions fixing
    """
    perms = unmasked_perms & ~umask
    try:
        st = os.stat( path )
    except OSError as e:
        log.exception( 'Unable to set permissions or group on %s' % path )
        return
    # fix modes
    if stat.S_IMODE( st.st_mode ) != perms:
        try:
            os.chmod( path, perms )
        except Exception as e:
            log.warning( 'Unable to honor umask (%s) for %s, tried to set: %s but mode remains %s, error was: %s' % ( oct( umask ),
                                                                                                                      path,
                                                                                                                      oct( perms ),
                                                                                                                      oct( stat.S_IMODE( st.st_mode ) ),
                                                                                                                      e ) )
    # fix group
    if gid is not None and st.st_gid != gid:
        try:
            os.chown( path, -1, gid )
        except Exception as e:
            try:
                desired_group = grp.getgrgid( gid )
                current_group = grp.getgrgid( st.st_gid )
            except:
                desired_group = gid
                current_group = st.st_gid
            log.warning( 'Unable to honor primary group (%s) for %s, group remains %s, error was: %s' % ( desired_group,
                                                                                                          path,
                                                                                                          current_group,
                                                                                                          e ) )
コード例 #24
0
ファイル: ZApostd.py プロジェクト: pombreda/zoo-animals
def setupLogs(options):
    logger = logging.getLogger()

    if options.trace or options.logfile:
        loglevel = getattr(logging, options.loglevel.upper())

        f = logging.Formatter('%(asctime)s %(filename)s %(levelname)s %(message)s', datefmt='%Y-%m-%dT%H:%M:%S')

        logger.setLevel(loglevel)

        if options.trace:
            s = logging.StreamHandler()
            s.setLevel(loglevel)
            s.setFormatter(f)

            logging.getLogger('').addHandler(s)

        if options.logfile:
            fh = logging.FileHandler(options.logfile)
            fh.setLevel(loglevel)
            fh.setFormatter(f)

            logging.getLogger('').addHandler(fh)

    logger.debug('workdir = {}'.format(options.workdir))
    logger.debug('oedir = {}'.format(options.oedir))
    logger.debug('svnloc = {}'.format(options.svnloc))
    logger.debug('attemptsdir = {}'.format(options.attemptsdir))
    logger.debug('uid = {} = {}'.format(os.getuid(), pwd.getpwuid(os.getuid()).pw_name))
    logger.debug('euid = {} = {}'.format(os.geteuid(), pwd.getpwuid(os.geteuid()).pw_name))
    logger.debug('gid = {} = {}'.format(os.getgid(), grp.getgrgid(os.getgid()).gr_name))
    logger.debug('egid = {} = {}'.format(os.getegid(), grp.getgrgid(os.getegid()).gr_name))

    return logger
コード例 #25
0
ファイル: root_fork.py プロジェクト: 4sp1r3/oschameleon
def drop_privileges(uid_name='nobody', gid_name='nogroup'):
    print("Init: Running as {0}/{1}.".format(pwd.getpwuid(os.getuid())[0], grp.getgrgid(os.getgid())[0]))
    wanted_uid = pwd.getpwnam(uid_name)[2]
    wanted_gid = grp.getgrnam(gid_name)[2]

    pid = gevent.fork()
    if pid == 0:
        # child
        print 'starting child process'
        child_process = gevent.spawn(root_process)
        child_process.join()
        print 'Child done:', child_process.successful()
        oschameleon.osfuscation.flush_tables()
        print 'Child exit'
    else:
        # parent
        os.setgid(wanted_gid)
        os.setuid(wanted_uid)
        new_uid_name = pwd.getpwuid(os.getuid())[0]
        new_gid_name = grp.getgrgid(os.getgid())[0]
        print("Parent: Privileges dropped, running as {0}/{1}.".format(new_uid_name, new_gid_name))
        while True:
            try:
                gevent.sleep(1)
                print 'Parent: ping'
            except KeyboardInterrupt:
                break
コード例 #26
0
def check_ownerandgroup(parser, section, oorg, debug):
    if oorg == 'owner':
        switch = "User"
    else:
        switch = "Group"

    if not parser.has_option(section, oorg):
        if debug:
            print >> sys.stderr, ("[debug] '%s' in section '%s' is not set"
                                  % (oorg, section))
        return None

    tempowner = parser.get(section, oorg)
    if tempowner in ['', 'None', 'none']:
        if debug:
            print >> sys.stderr, ("[debug] '%s' in section '%s' is not "
                                  "set" % (oorg, section))
        return None

    try:
        tempowner = int(tempowner)
    except ValueError:
        try:
            if oorg == 'owner':
                pwd.getpwnam(tempowner)
            else:
                grp.getgrnam(tempowner)
        except KeyError:
            owner = False
            print >> sys.stderr, ("Error in section '%s': %s '%s' "
                                  "doesn't exist" % (section, switch,
                                  tempowner))
        else:
            # save the user/group as uid
            if oorg == 'owner':
                owner = pwd.getpwnam(tempowner).pw_uid
            else:
                owner = grp.getgrnam(tempowner).gr_gid
            if debug:
                print >> sys.stderr, ("[debug] '%s' in section '%s' "
                                      "is valid" % (oorg, section))
    else:
        try:
            if oorg == 'owner':
                pwd.getpwuid(tempowner)
            else:
                grp.getgrgid(tempowner)
        except KeyError:
            print >> sys.stderr, ("Error in section '%s': %s '%s' "
                                  "doesn't exist" % (section, switch,
                                  tempowner))
            owner = False
        else:
            owner = tempowner
            if debug:
                print >> sys.stderr, ("[debug] '%s' in section '%s' "
                                      "is valid" % (oorg, section))

    return owner
コード例 #27
0
ファイル: utils.py プロジェクト: Kacn/GateOne
def drop_privileges(uid='nobody', gid='nogroup', supl_groups=None):
    """
    Drop privileges by changing the current process owner/group to
    *uid*/*gid* (both may be an integer or a string).  If *supl_groups* (list)
    is given the process will be assigned those values as its effective
    supplemental groups.  If *supl_groups* is None it will default to using
    'tty' as the only supplemental group.  Example::

        drop_privileges('gateone', 'gateone', ['tty'])

    This would change the current process owner to gateone/gateone with 'tty' as
    its only supplemental group.

    .. note:: On most Unix systems users must belong to the 'tty' group to create new controlling TTYs which is necessary for 'pty.fork()' to work.

    .. tip:: If you get errors like, "OSError: out of pty devices" it likely means that your OS uses something other than 'tty' as the group owner of the devpts filesystem.  'mount | grep pts' will tell you the owner.
    """
    import pwd, grp
    running_gid = gid
    if not isinstance(uid, int):
        # Get the uid/gid from the name
        running_uid = pwd.getpwnam(uid).pw_uid
    running_uid = uid
    if not isinstance(gid, int):
        running_gid = grp.getgrnam(gid).gr_gid
    if supl_groups:
        for i, group in enumerate(supl_groups):
            # Just update in-place
            if not isinstance(group, int):
                supl_groups[i] = grp.getgrnam(group).gr_gid
        try:
            os.setgroups(supl_groups)
        except OSError as e:
            logging.error(_('Could not set supplemental groups: %s' % e))
            exit()
    # Try setting the new uid/gid
    try:
        os.setgid(running_gid)
    except OSError as e:
        logging.error(_('Could not set effective group id: %s' % e))
        exit()
    try:
        os.setuid(running_uid)
    except OSError as e:
        logging.error(_('Could not set effective user id: %s' % e))
        exit()
    # Ensure a very convervative umask
    new_umask = 0o77
    old_umask = os.umask(new_umask)
    final_uid = os.getuid()
    final_gid = os.getgid()
    human_supl_groups = []
    for group in supl_groups:
        human_supl_groups.append(grp.getgrgid(group).gr_name)
    logging.info(_(
        'Running as user/group, "%s/%s" with the following supplemental groups:'
        ' %s' % (pwd.getpwuid(final_uid)[0], grp.getgrgid(final_gid)[0],
                 ",".join(human_supl_groups))
    ))
コード例 #28
0
ファイル: host.py プロジェクト: gnuoy/charm-hacluster
def gid_exists(gid):
    """Check if a gid exists"""
    try:
        grp.getgrgid(gid)
        gid_exists = True
    except KeyError:
        gid_exists = False
    return gid_exists
コード例 #29
0
ファイル: users.py プロジェクト: caglar10ur/func
 def gid_info(self,gid):
     """Returns group info or false for a specified group (by GID) on the target system(s)."""
     try:
         if grp.getgrgid(int(gid)):
             info = grp.getgrgid(int(gid))
             return list(info)
     except KeyError:
         return False
コード例 #30
0
ファイル: utils.py プロジェクト: sebrandon1/keystone
def get_unix_group(group=None):
    """Get the gid and group name.

    This is a convenience utility which accepts a variety of input
    which might represent a unix group. If successful it returns the gid
    and name. Valid input is:

    string
        A string is first considered to be a group name and a lookup is
        attempted under that name. If no name is found then an attempt
        is made to convert the string to an integer and perform a
        lookup as a gid.

    int
        An integer is interpreted as a gid.

    None
        None is interpreted to mean use the current process's
        effective group.

    If the input is a valid type but no group is found a KeyError is
    raised. If the input is not a valid type a TypeError is raised.


    :param object group: string, int or None specifying the group to
                         lookup.

    :returns: tuple of (gid, name)

    """
    if isinstance(group, six.string_types):
        try:
            group_info = grp.getgrnam(group)
        except KeyError:
            # Was an int passed as a string?
            # Try converting to int and lookup by id instead.
            try:
                i = int(group)
            except ValueError:
                raise KeyError("group name '%s' not found" % group)
            try:
                group_info = grp.getgrgid(i)
            except KeyError:
                raise KeyError("group id %d not found" % i)
    elif isinstance(group, int):
        try:
            group_info = grp.getgrgid(group)
        except KeyError:
            raise KeyError("group id %d not found" % group)
    elif group is None:
        group_info = grp.getgrgid(os.getegid())
    else:
        group_cls_name = reflection.get_class_name(group,
                                                   fully_qualified=False)
        raise TypeError('group must be string, int or None; not %s (%r)' %
                        (group_cls_name, group))

    return group_info.gr_gid, group_info.gr_name
コード例 #31
0
def ls(path, options=''):
    file_list = []
    files = os.listdir(path)

    # -aオプションの動作を規定する部分
    if 'a' in options:
        file_list = files
    else:
        for file in files:
            if file[0] == '.':
                pass
            else:
                file_list.append(file)

    for target in file_list:
        stat_status = os.lstat(path + "/" + target)
        result = []
        result.append(target)
        file_info_list = []
        stat_mode = stat_status[stat.ST_MODE]

        # -lオプションを規定する部分
        if 'l' in options:
            # 更新日時の表示を規定する部分
            last_modified = stat_status.st_mtime
            dt = datetime.datetime.fromtimestamp(last_modified)
            tmp_time = dt.strftime("%H:%M")
            tmp_manth = dt.strftime("%b")
            tmp_date = dt.strftime("%d")
            result.insert(0, tmp_time)
            result.insert(0, tmp_date)
            result.insert(0, tmp_manth)

            # バイト数の表示を規定する部分
            result.insert(0, os.path.getsize(path + "/" + target))

            # 対象の所有者表示を規定する部分
            uid = pwd.getpwuid(stat_status[4]).pw_name
            gid = grp.getgrgid(stat_status[5]).gr_name
            result.insert(0, gid)
            result.insert(0, uid)

            # リンク数を規定する部分
            link_cnt = stat_status[3]
            result.insert(0, link_cnt)

            # 対象の種類(ファイル/ディレクトリ/リンク)を識別する部分
            tmp_data = ''
            if stat.S_ISLNK(stat_mode):
                tmp_data = 'l'
            elif stat.S_ISDIR(stat_mode):
                tmp_data = 'd'
            elif stat.S_ISREG(stat_mode):
                tmp_data = '-'

# 実行権限について規定する部分
            for level in "OTH", "GRP", "USR":
                for param in "X", "W", "R":
                    if stat_mode & getattr(stat, "S_I" + param + level):
                        tmp_data = tmp_data + param.lower()
                    else:
                        tmp_data = tmp_data + '-'

            result.insert(0, tmp_data)


# inode出力を規定する部分
        if 'i' in options:
            inode = stat_status[1]
            result.insert(0, inode)

        str_result = map(str, result)
        print(" ".join(str_result))
コード例 #32
0
def root_process():
    print("Child running as {0}/{1}.".format(
        pwd.getpwuid(os.getuid())[0],
        grp.getgrgid(os.getgid())[0]))
    oschameleon.osfuscation.OSFuscation.run()
コード例 #33
0
    def __init__(
        self,
        tag='tmp-bioconda-builder',
        container_recipe='/opt/recipe',
        container_staging="/opt/host-conda-bld",
        requirements=None,
        build_script_template=BUILD_SCRIPT_TEMPLATE,
        dockerfile_template=DOCKERFILE_TEMPLATE,
        use_host_conda_bld=False,
        pkg_dir=None,
        keep_image=False,
        image_build_dir=None,
        docker_base_image=None,
    ):
        """
        Class to handle building a custom docker container that can be used for
        building conda recipes.

        Parameters
        ----------
        tag : str
            Tag to be used for the custom-build docker container. Mostly for
            debugging purposes when you need to inspect the container.

        container_recipe : str
            Directory to which the host's recipe will be exported. Will be
            read-only.

        container_staging : str
            Directory to which the host's conda-bld dir will be mounted so that
            the container can use previously-built packages as dependencies.
            Upon successful building container-built packages will be copied
            over. Mounted as read-write.

        requirements : None or str
            Path to a "requirements.txt" file which will be installed with
            conda in a newly-created container. If None, then use the default
            installed with bioconda_utils.

        build_script_template : str
            Template that will be filled in with .format(self=self) and that
            will be run in the container each time build_recipe() is called. If
            not specified, uses docker_utils.BUILD_SCRIPT_TEMPLATE.

        dockerfile_template : str
            Template that will be filled in with .format(self=self) and that
            will be used to build a custom image. Uses
            docker_utils.DOCKERFILE_TEMPLATE by default.

        use_host_conda_bld : bool
            If True, then use the host's conda-bld directory. This will export
            the host's existing conda-bld directory to the docker container,
            and any recipes successfully built by the container will be added
            here.

            Otherwise, use `pkg_dir` as a common host directory used across
            multiple runs of this RecipeBuilder object.

        pkg_dir : str or None
            Specify where packages should appear on the host.

            If `pkg_dir` is None, then a temporary directory will be
            created once for each RecipeBuilder instance and that directory
            will be used for each call to `RecipeBuilder.build()`. This allows
            subsequent recipes built by the container to see previous built
            recipes without polluting the host's conda-bld directory.

            If `pkg_dir` is a string, then it will be created if needed and
            this directory will be used store all built packages on the host
            instead of the temp dir.

            If the above argument `use_host_conda_bld` is True, then the value
            of `pkg_dir` will be ignored and the host's conda-bld directory
            will be used.

            In all cases, `pkg_dir` will be mounted to `container_staging` in
            the container.

        keep_image : bool
            By default, the built docker image will be removed when done,
            freeing up storage space.  Set keep_image=True to disable this
            behavior.

        image_build_dir : str or None
            If not None, use an existing directory as a docker image context
            instead of a temporary one. For testing purposes only.

        docker_base_image : str or None
            Name of base image that can be used in `dockerfile_template`.
            Defaults to 'bioconda/bioconda-utils-build-env:2019-02-01'
        """
        self.tag = tag
        self.requirements = requirements
        self.conda_build_args = ""
        self.build_script_template = build_script_template
        self.dockerfile_template = dockerfile_template
        self.keep_image = keep_image
        if docker_base_image is None:
            docker_base_image = 'bioconda/bioconda-utils-build-env:2019-02-01'
        self.docker_base_image = docker_base_image

        # To address issue #5027:
        #
        # https_proxy is the standard name, but conda looks for HTTPS_PROXY in all
        # caps. So we look for both in the current environment. If both exist
        # then ensure they have the same value; otherwise use whichever exists.
        #
        # Note that the proxy needs to be in the image when building it, and
        # that the proxies need to be set before the conda install command. The
        # position of `{self.proxies}` in `dockerfile_template` should reflect
        # this.
        _proxies = []
        http_proxy = set([
            os.environ.get('http_proxy', None),
            os.environ.get('HTTP_PROXY', None)
        ]).difference([None])

        https_proxy = set([
            os.environ.get('https_proxy', None),
            os.environ.get('HTTPS_PROXY', None)
        ]).difference([None])

        if len(http_proxy) == 1:
            proxy = list(http_proxy)[0]
            _proxies.append('ENV http_proxy {0}'.format(proxy))
            _proxies.append('ENV HTTP_PROXY {0}'.format(proxy))
        elif len(http_proxy) > 1:
            raise ValueError("http_proxy and HTTP_PROXY have different values")

        if len(https_proxy) == 1:
            proxy = list(https_proxy)[0]
            _proxies.append('ENV https_proxy {0}'.format(proxy))
            _proxies.append('ENV HTTPS_PROXY {0}'.format(proxy))
        elif len(https_proxy) > 1:
            raise ValueError(
                "https_proxy and HTTPS_PROXY have different values")
        self.proxies = '\n'.join(_proxies)

        # find and store user info
        uid = os.getuid()
        usr = pwd.getpwuid(uid)
        self.user_info = dict(uid=uid,
                              gid=usr.pw_gid,
                              groupname=grp.getgrgid(usr.pw_gid).gr_name,
                              username=usr.pw_name)

        self.container_recipe = container_recipe
        self.container_staging = container_staging

        self.host_conda_bld = get_host_conda_bld()

        if use_host_conda_bld:
            self.pkg_dir = self.host_conda_bld
        else:
            if pkg_dir is None:
                self.pkg_dir = tempfile.mkdtemp()
            else:
                if not os.path.exists(pkg_dir):
                    os.makedirs(pkg_dir)
                self.pkg_dir = pkg_dir

        # Copy the conda build config files to the staging directory that is
        # visible in the container
        for i, config_file in enumerate(utils.get_conda_build_config_files()):
            dst_file = self._get_config_path(self.pkg_dir, i, config_file)
            shutil.copyfile(config_file.path, dst_file)

        self._build_image(image_build_dir)
コード例 #34
0
    def _systemd(self):
        """Setup systemd configuration.

        Args:
            None

        Returns:
            None

        """
        # Initialize key variables
        username = self.switchmap_user
        groupname = grp.getgrgid(self.gid).gr_name
        system_directory = '/etc/systemd/system'
        system_command = '/bin/systemctl daemon-reload'
        poller_service = 'switchmap-ng-poller.service'
        api_service = 'switchmap-ng-api.service'

        # Do nothing if systemd isn't installed
        if os.path.isdir(system_directory) is False:
            return

        # Copy system files to systemd directory and activate
        poller_startup_script = (
            '{}/examples/linux/systemd/{}'
            ''.format(self.root_directory, poller_service))
        api_startup_script = (
            '{}/examples/linux/systemd/{}'
            ''.format(self.root_directory, api_service))

        # Read in file
        # 1) Convert home directory to that of user
        # 2) Convert username in file
        # 3) Convert group in file
        filenames = [poller_startup_script, api_startup_script]
        for filename in filenames:
            # Read next file
            with open(filename, 'r') as f_handle:
                contents = f_handle.read()

            # Substitute home directory
            contents = re.sub(
                r'/home/switchmap-ng',
                self.root_directory,
                contents)

            # Substitute username
            contents = re.sub(
                'User=switchmap-ng',
                'User={}'.format(username),
                contents)

            # Substitute group
            contents = re.sub(
                'Group=switchmap-ng',
                'Group={}'.format(groupname),
                contents)

            # Write contents
            filepath = (
                '{}/{}'.format(system_directory, os.path.basename(filename)))
            if os.path.isdir(system_directory):
                with open(filepath, 'w') as f_handle:
                    f_handle.write(contents)

        # Make systemd recognize new files
        if os.path.isdir(system_directory):
            general.run_script(system_command)

        # Enable serices
        services = [poller_service, api_service]
        for service in services:
            enable_command = 'systemctl enable {}'.format(service)
            general.run_script(enable_command)
コード例 #35
0
ファイル: posix_util.py プロジェクト: AlexisMarie8330/Doll
def ValidateFilePermissionAccess(url_str, uid=NA_ID, gid=NA_ID, mode=NA_MODE):
    """Validates that the user has file access if uid, gid, and mode are applied.

  Args:
    url_str: The path to the object for which this is validating.
    uid: A POSIX user ID.
    gid: A POSIX group ID.
    mode: A 3-digit, number representing POSIX permissions, must be in base-8.

  Returns:
    A (bool, str) tuple, True if and only if it's safe to copy the file, and a
    string details for the error.
  """
    # Windows doesn't use the POSIX system for file permissions, so all files will
    # validate.
    if IS_WINDOWS:
        return True, ''

    uid_present = uid > NA_ID
    gid_present = gid > NA_ID
    mode_present = mode > NA_MODE
    mode_valid = ValidatePOSIXMode(int(str(mode), 8))

    if not (uid_present or gid_present or mode_present):
        return True, ''

    if not mode_valid and mode_present:
        return False, 'Mode for %s won\'t allow read access.' % url_str
    elif not mode_present:
        # Calculate the default mode if the mode doesn't exist.
        # Convert mode to a 3-digit, base-8 integer.
        mode = int(GetDefaultMode())

    if uid_present:
        try:
            pwd.getpwuid(uid)
        except (KeyError, OverflowError):
            return (False,
                    'UID for %s doesn\'t exist on current system. uid: %d' %
                    (url_str, uid))
    if gid_present:
        try:
            grp.getgrgid(gid)
        except (KeyError, OverflowError):
            return (False,
                    'GID for %s doesn\'t exist on current system. gid: %d' %
                    (url_str, gid))

    # uid at this point must exist, but isn't necessarily the current user.
    # Likewise, gid must also exist at this point.
    uid_is_current_user = uid == os.getuid()

    # By this point uid and gid must exist on the system. However, the uid might
    # not match the current user's or the current user might not be a member of
    # the group identified by gid. In this case, the 'other' byte of the
    # permissions could provide sufficient access.
    mode = int(str(mode), 8)
    # Check that if the uid is not present and the gid and mode are, so that we
    # won't orphan the file. For example if the mode is set to 007, we can orphan
    # the file because the uid would default to the current user's ID and if the
    # current user wouldn't have read access or better, the file will be orphaned
    # even though they might otherwise have access through the gid or other bytes.
    if not uid_present and gid_present and mode_present and not bool(mode
                                                                     & U_R):
        return (False,
                'Insufficient access with uid/gid/mode for %s, gid: %d, '
                'mode: %s' % (url_str, gid, oct(mode)[-3:]))
    if uid_is_current_user:
        valid = bool(mode & U_R)
        return (valid, '' if valid else
                'Insufficient access with uid/gid/mode for %s, uid: %d, '
                'mode: %s' % (url_str, uid, oct(mode)[-3:]))
    elif gid in USER_GROUPS:
        valid = bool(mode & G_R)
        return (valid, '' if valid else
                'Insufficient access with uid/gid/mode for %s, gid: %d, '
                'mode: %s' % (url_str, gid, oct(mode)[-3:]))
    elif mode & O_R:
        return True, ''
    elif not uid_present and not gid_present and mode_valid:
        return True, ''
    return False, 'There was a problem validating %s.' % url_str
コード例 #36
0
def _group_by_id(id_):
    # type: (int) -> grp.struct_group
    return grp.getgrgid(id_)
コード例 #37
0
from unittest.mock import patch
from datetime import date, datetime
from threading import Thread
from tempfile import TemporaryDirectory
from test import generic

sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import config
import snapshots
import tools

CURRENTUID = os.geteuid()
CURRENTUSER = pwd.getpwuid(CURRENTUID).pw_name

CURRENTGID = os.getegid()
CURRENTGROUP = grp.getgrgid(CURRENTGID).gr_name

#all groups the current user is member in
GROUPS = [i.gr_name for i in grp.getgrall() if CURRENTUSER in i.gr_mem]
NO_GROUPS = not GROUPS

IS_ROOT = os.geteuid() == 0


class TestSnapshots(generic.SnapshotsTestCase):
    ############################################################################
    ###                       takeSnapshotMessage                            ###
    ############################################################################
    def test_setTakeSnapshotMessage_info(self):
        self.sn.setTakeSnapshotMessage(0, 'first message')
        self.sn.snapshotLog.flush()
コード例 #38
0
def get_owner_group_names(uid, gid, cliargs):
    """This is the get owner group name function.
    It tries to get owner and group names and deals
    with uid/gid -> name cacheing.
    Returns owner and group.
    """

    # try to get owner user name
    # first check cache
    if uid in uids:
        owner = owners[uid]
    # not in cache
    else:
        # check if we should just get uid or try to get owner name
        if config['ownersgroups_uidgidonly'] == "true" or cliargs['crawlapi']:
            owner = uid
        else:
            try:
                # check if domain in name
                if config['ownersgroups_domain'] == "true":
                    # check if we should remove the domain from owner
                    if config['ownersgroups_keepdomain'] == "true":
                        owner = pwd.getpwuid(uid).pw_name
                    else:
                        if config['ownersgroups_domainfirst'] == "true":
                            owner = pwd.getpwuid(uid).pw_name.split(config['ownersgroups_domainsep'])[1]
                        else:
                            owner = pwd.getpwuid(uid).pw_name.split(config['ownersgroups_domainsep'])[0]
                else:
                    owner = pwd.getpwuid(uid).pw_name
            # if we can't find the owner's user name, use the uid number
            except KeyError:
                owner = uid
        # store it in cache
        if not uid in uids:
            uids.append(uid)
            owners[uid] = owner

    # try to get group name
    # first check cache
    if gid in gids:
        group = groups[gid]
    # not in cache
    else:
        # check if we should just get gid or try to get group name
        if config['ownersgroups_uidgidonly'] == "true" or cliargs['crawlapi']:
            group = gid
        else:
            try:
                # check if domain in name
                if config['ownersgroups_domain'] == "true":
                    # check if we should remove the domain from group
                    if config['ownersgroups_keepdomain'] == "true":
                        group = grp.getgrgid(gid).gr_name
                    else:
                        if config['ownersgroups_domainfirst'] == "true":
                            group = grp.getgrgid(gid).gr_name.split(config['ownersgroups_domainsep'])[1]
                        else:
                            group = grp.getgrgid(gid).gr_name.split(config['ownersgroups_domainsep'])[0]
                else:
                    group = grp.getgrgid(gid).gr_name
            # if we can't find the group's name, use the gid number
            except KeyError:
                group = gid
        # store in cache
        if not gid in gids:
            gids.append(gid)
            groups[gid] = group

    return owner, group
コード例 #39
0
ファイル: file_utils.py プロジェクト: vzhestkov/uyuni
    def diff(self, file_struct):
        self._validate_struct(file_struct)

        temp_file, temp_dirs = self.process(file_struct)
        path = file_struct['path']
        sectx_result = ''
        owner_result = ''
        group_result = ''
        perm_result = ''
        result = ''

        stat_err = 0

        try:
            cur_stat = os.lstat(path)
        except:
            stat_err = 1

        if file_struct['filetype'] != 'symlink':
            if not stat_err:
                #check for owner differences
                cur_uid = cur_stat[stat.ST_UID]
                try:
                    cur_user = pwd.getpwuid(cur_uid)[0]
                except KeyError:
                    #Orphan UID with no name,return unknown
                    cur_user = "******" % (cur_uid, )
            else:
                cur_user = "******"

            if cur_user == file_struct['username']:
                owner_result = ""

            else:
                owner_result = "User name differ: actual: [%s], expected: [%s]\n" % (
                    cur_user, file_struct['username'])

            if not stat_err:
                #check for group differences
                cur_gid = cur_stat[stat.ST_GID]
                try:
                    cur_group = grp.getgrgid(cur_gid)[0]
                except KeyError:
                    #Orphan GID with no name,return unknown
                    cur_group = "unknown(GID %d)" % (cur_gid, )
            else:
                cur_group = "missing"

            if cur_group == file_struct['groupname']:
                group_result = ""
            else:
                group_result = "Group name differ: actual: [%s], expected: [%s]\n" % (
                    cur_group, file_struct['groupname'])

            #check for permissions differences
            if not stat_err:
                cur_perm = format(stat.S_IMODE(cur_stat[stat.ST_MODE]), 'o')
            else:
                cur_perm = "missing"

            #rip off the leading '0' from the mode returned by stat()
            if cur_perm[0] == '0':
                cur_perm = cur_perm[1:]

            #perm_status gets displayed with the verbose option.
            if cur_perm == str(file_struct['filemode']):
                perm_result = ""
            else:
                perm_result = "File mode differ: actual: [%s], expected: [%s]\n" % (
                    cur_perm, file_struct['filemode'])

        try:
            cur_sectx = lgetfilecon(path)[1]
        except OSError:  # workarounding BZ 690238
            cur_sectx = None

        if cur_sectx == None:
            cur_sectx = ''

        if 'selinux_ctx' in file_struct and file_struct['selinux_ctx']:
            if cur_sectx != file_struct['selinux_ctx']:
                sectx_result = "SELinux contexts differ:  actual: [%s], expected: [%s]\n" % (
                    cur_sectx, file_struct['selinux_ctx'])

        if file_struct['filetype'] == 'directory':
            if os.path.isdir(file_struct['path']):
                result = ''
            else:
                result = "Deployed directory is no longer a directory!"
        elif file_struct['filetype'] == 'symlink':
            try:
                curlink = os.readlink(path)
                newlink = os.readlink(temp_file)
                if curlink == newlink:
                    result = ''
                else:
                    result = "Link targets differ for [%s]: actual: [%s], expected: [%s]\n" % (
                        path, curlink, newlink)
            except OSError:
                e = sys.exc_info()[1]
                if e.errno == 22:
                    result = "Deployed symlink is no longer a symlink!"
                else:
                    raise e
        else:
            result = ''.join(
                diff(temp_file,
                     path,
                     display_diff=get_config('display_diff'),
                     is_binary=False
                     if file_struct['is_binary'] == 'N' else True))

        if temp_file:
            os.unlink(temp_file)
        return owner_result + group_result + perm_result + sectx_result + result
コード例 #40
0
ファイル: unarchive.py プロジェクト: amey-git/py-net
    def is_unarchived(self):
        cmd = '%s -ZT -s "%s"' % (self.cmd_path, self.src)
        if self.excludes:
            cmd += ' -x "' + '" "'.join(self.excludes) + '"'
        rc, out, err = self.module.run_command(cmd)

        old_out = out
        diff = ''
        out = ''
        if rc == 0:
            unarchived = True
        else:
            unarchived = False

        # Get some information related to user/group ownership
        umask = os.umask(0)
        os.umask(umask)

        # Get current user and group information
        groups = os.getgroups()
        run_uid = os.getuid()
        run_gid = os.getgid()
        try:
            run_owner = pwd.getpwuid(run_uid).pw_name
        except:
            run_owner = run_uid
        try:
            run_group = grp.getgrgid(run_gid).gr_name
        except:
            run_group = run_gid

        # Get future user ownership
        fut_owner = fut_uid = None
        if self.file_args['owner']:
            try:
                tpw = pwd.getpwname(self.file_args['owner'])
            except:
                try:
                    tpw = pwd.getpwuid(self.file_args['owner'])
                except:
                    tpw = pwd.getpwuid(run_uid)
            fut_owner = tpw.pw_name
            fut_uid = tpw.pw_uid
        else:
            try:
                fut_owner = run_owner
            except:
                pass
            fut_uid = run_uid

        # Get future group ownership
        fut_group = fut_gid = None
        if self.file_args['group']:
            try:
                tgr = grp.getgrnam(self.file_args['group'])
            except:
                try:
                    tgr = grp.getgrgid(self.file_args['group'])
                except:
                    tgr = grp.getgrgid(run_gid)
            fut_group = tgr.gr_name
            fut_gid = tgr.gr_gid
        else:
            try:
                fut_group = run_group
            except:
                pass
            fut_gid = run_gid

        for line in old_out.splitlines():
            change = False

            pcs = line.split(None, 7)

            # Check first and seventh field in order to skip header/footer
            if len(pcs[0]) != 7 and len(pcs[0]) != 10: continue
            if len(pcs[6]) != 15: continue

            ztype = pcs[0][0]
            permstr = pcs[0][1:10]
            version = pcs[1]
            ostype = pcs[2]
            size = int(pcs[3])
            path = unicode(pcs[7], 'utf-8')

            # Skip excluded files
            if path in self.excludes:
                out += 'Path %s is excluded on request\n' % path
                continue

            # Itemized change requires L for symlink
            if path[-1] == '/':
                if ztype != 'd':
                    err += 'Path %s incorrectly tagged as "%s", but is a directory.\n' % (path, ztype)
                ftype = 'd'
            elif ztype == 'l':
                ftype = 'L'
            elif ztype == '-':
                ftype = 'f'
            elif ztype == '?':
                ftype = 'f'

            # Some files may be storing FAT permissions, not Unix permissions
            if len(permstr) == 6:
                if path[-1] == '/':
                    permstr = 'rwxrwxrwx'
                elif permstr == 'rwx---':
                    permstr = 'rwxrwxrwx'
                else:
                    permstr = 'rw-rw-rw-'

            # Test string conformity
            if len(permstr) != 9 or not ZIP_FILE_MODE_RE.match(permstr):
                raise UnarchiveError('ZIP info perm format incorrect, %s' % permstr)

            # DEBUG
#            err += "%s%s %10d %s\n" % (ztype, permstr, size, path)

            dest = os.path.join(self.dest, path)
            try:
                st = os.lstat(dest)
            except:
                change = True
                self.includes.append(path)
                err += 'Path %s is missing\n' % path
                diff += '>%s++++++.?? %s\n' % (ftype, path)
                continue

            # Compare file types
            if ftype == 'd' and not stat.S_ISDIR(st.st_mode):
                change = True
                self.includes.append(path)
                err += 'File %s already exists, but not as a directory\n' % path
                diff += 'c%s++++++.?? %s\n' % (ftype, path)
                continue

            if ftype == 'f' and not stat.S_ISREG(st.st_mode):
                change = True
                unarchived = False
                self.includes.append(path)
                err += 'Directory %s already exists, but not as a regular file\n' % path
                diff += 'c%s++++++.?? %s\n' % (ftype, path)
                continue

            if ftype == 'L' and not stat.S_ISLNK(st.st_mode):
                change = True
                self.includes.append(path)
                err += 'Directory %s already exists, but not as a symlink\n' % path
                diff += 'c%s++++++.?? %s\n' % (ftype, path)
                continue

            itemized = list('.%s.......??' % ftype)

            dt_object = datetime.datetime(*(time.strptime(pcs[6], '%Y%m%d.%H%M%S')[0:6]))
            timestamp = time.mktime(dt_object.timetuple())

            # Compare file timestamps
            if stat.S_ISREG(st.st_mode):
                if self.module.params['keep_newer']:
                    if timestamp > st.st_mtime:
                        change = True
                        self.includes.append(path)
                        err += 'File %s is older, replacing file\n' % path
                        itemized[4] = 't'
                    elif stat.S_ISREG(st.st_mode) and timestamp < st.st_mtime:
                        # Add to excluded files, ignore other changes
                        out += 'File %s is newer, excluding file\n' % path
                        self.excludes.append(path)
                        continue
                else:
                    if timestamp != st.st_mtime:
                        change = True
                        self.includes.append(path)
                        err += 'File %s differs in mtime (%f vs %f)\n' % (path, timestamp, st.st_mtime)
                        itemized[4] = 't'

            # Compare file sizes
            if stat.S_ISREG(st.st_mode) and size != st.st_size:
                change = True
                err += 'File %s differs in size (%d vs %d)\n' % (path, size, st.st_size)
                itemized[3] = 's'

            # Compare file checksums
            if stat.S_ISREG(st.st_mode):
                crc = crc32(dest)
                if crc != self._crc32(path):
                    change = True
                    err += 'File %s differs in CRC32 checksum (0x%08x vs 0x%08x)\n' % (path, self._crc32(path), crc)
                    itemized[2] = 'c'

            # Compare file permissions

            # Do not handle permissions of symlinks
            if ftype != 'L':

                # Use the new mode provided with the action, if there is one
                if self.file_args['mode']:
                    if isinstance(self.file_args['mode'], int):
                        mode = self.file_args['mode']
                    else:
                        try:
                            mode = int(self.file_args['mode'], 8)
                        except Exception:
                            e = get_exception()
                            self.module.fail_json(path=path, msg="mode %(mode)s must be in octal form" % self.file_args, details=str(e))
                # Only special files require no umask-handling
                elif ztype == '?':
                    mode = self._permstr_to_octal(permstr, 0)
                else:
                    mode = self._permstr_to_octal(permstr, umask)

                if mode != stat.S_IMODE(st.st_mode):
                    change = True
                    itemized[5] = 'p'
                    err += 'Path %s differs in permissions (%o vs %o)\n' % (path, mode, stat.S_IMODE(st.st_mode))

            # Compare file user ownership
            owner = uid = None
            try:
                owner = pwd.getpwuid(st.st_uid).pw_name
            except:
                uid = st.st_uid

            # If we are not root and requested owner is not our user, fail
            if run_uid != 0 and (fut_owner != run_owner or fut_uid != run_uid):
                raise UnarchiveError('Cannot change ownership of %s to %s, as user %s' % (path, fut_owner, run_owner))

            if owner and owner != fut_owner:
                change = True
                err += 'Path %s is owned by user %s, not by user %s as expected\n' % (path, owner, fut_owner)
                itemized[6] = 'o'
            elif uid and uid != fut_uid:
                change = True
                err += 'Path %s is owned by uid %s, not by uid %s as expected\n' % (path, uid, fut_uid)
                itemized[6] = 'o'

            # Compare file group ownership
            group = gid = None
            try:
                group = grp.getgrgid(st.st_gid).gr_name
            except:
                gid = st.st_gid

            if run_uid != 0 and fut_gid not in groups:
                raise UnarchiveError('Cannot change group ownership of %s to %s, as user %s' % (path, fut_group, run_owner))

            if group and group != fut_group:
                change = True
                err += 'Path %s is owned by group %s, not by group %s as expected\n' % (path, group, fut_group)
                itemized[6] = 'g'
            elif gid and gid != fut_gid:
                change = True
                err += 'Path %s is owned by gid %s, not by gid %s as expected\n' % (path, gid, fut_gid)
                itemized[6] = 'g'

            # Register changed files and finalize diff output
            if change:
                if path not in self.includes:
                    self.includes.append(path)
                diff += '%s %s\n' % (''.join(itemized), path)

        if self.includes:
            unarchived = False

        # DEBUG
#        out = old_out + out

        return dict(unarchived=unarchived, rc=rc, out=out, err=err, cmd=cmd, diff=diff)
コード例 #41
0
        def dirList(path):
            """
            Create recursive list of folders and files from base path.
            The format of a node is: {"path": "/..." "data": "name", "folder":true|false, "children": []}
            """
            # The node is valid ?
            if not path:
                return False
            # Cleanup '/'
            if path != '/':
                path = path.rstrip('/')
            # This is folder ?
            if os.path.isfile(path):
                return False

            len_path = len(base_path) + 1
            dlist = [] # Folders list
            flist = [] # Files list

            try:
                names = sorted(os.listdir(path), key=str.lower)
            except Exception as e:
                log.warning('*WARN* Cannot list folder `{}`: `{}`!'.format(path, e))
                return []

            # Cycle a folder
            for fname in names:
                long_path  = path + os.sep + fname
                # If filter is active and file doesn't match, ignore
                if filter and os.path.isfile(long_path) and long_path not in filter:
                    continue

                # Ignore hidden files
                if hidden and fname[0] == '.':
                    continue
                # Meta info
                try:
                    fstat = os.stat(long_path)
                    try:
                        uname = pwd.getpwuid(fstat.st_uid).pw_name
                    except Exception:
                        uname = fstat.st_uid
                    try:
                        gname = grp.getgrgid(fstat.st_gid).gr_name
                    except Exception:
                        gname = fstat.st_gid
                    meta_info = '{}|{}|{}|{}'.format(uname, gname, fstat.st_size,
                        time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(fstat.st_mtime)))
                except Exception:
                    meta_info = ''

                # Semi long path
                short_path = long_path[len_path:]
                # Data to append
                nd = {'path': short_path, 'data': fname, 'meta': meta_info}

                if os.path.isdir(long_path):
                    nd['folder'] = True
                    # Recursive !
                    if recursive:
                        children = dirList(long_path)
                    else:
                        children = []
                    if children in [False, None]:
                        continue
                    nd['children'] = children
                    dlist.append(nd)
                else:
                    flist.append(nd)

            # Folders first, files second
            return dlist + flist
コード例 #42
0
            def process(path):
                s = os.lstat(path)

                if stat.S_ISDIR(s.st_mode):
                    update_hash('d')
                elif stat.S_ISCHR(s.st_mode):
                    update_hash('c')
                elif stat.S_ISBLK(s.st_mode):
                    update_hash('b')
                elif stat.S_ISSOCK(s.st_mode):
                    update_hash('s')
                elif stat.S_ISLNK(s.st_mode):
                    update_hash('l')
                elif stat.S_ISFIFO(s.st_mode):
                    update_hash('p')
                else:
                    update_hash('-')

                def add_perm(mask, on, off='-'):
                    if mask & s.st_mode:
                        update_hash(on)
                    else:
                        update_hash(off)

                add_perm(stat.S_IRUSR, 'r')
                add_perm(stat.S_IWUSR, 'w')
                if stat.S_ISUID & s.st_mode:
                    add_perm(stat.S_IXUSR, 's', 'S')
                else:
                    add_perm(stat.S_IXUSR, 'x')

                if include_owners:
                    # Group/other permissions are only relevant in pseudo context
                    add_perm(stat.S_IRGRP, 'r')
                    add_perm(stat.S_IWGRP, 'w')
                    if stat.S_ISGID & s.st_mode:
                        add_perm(stat.S_IXGRP, 's', 'S')
                    else:
                        add_perm(stat.S_IXGRP, 'x')

                    add_perm(stat.S_IROTH, 'r')
                    add_perm(stat.S_IWOTH, 'w')
                    if stat.S_ISVTX & s.st_mode:
                        update_hash('t')
                    else:
                        add_perm(stat.S_IXOTH, 'x')

                    try:
                        update_hash(" %10s" % pwd.getpwuid(s.st_uid).pw_name)
                        update_hash(" %10s" % grp.getgrgid(s.st_gid).gr_name)
                    except KeyError as e:
                        bb.warn("KeyError in %s" % path)
                        msg = ("KeyError: %s\nPath %s is owned by uid %d, gid %d, which doesn't match "
                            "any user/group on target. This may be due to host contamination." % (e, path, s.st_uid, s.st_gid))
                        raise Exception(msg).with_traceback(e.__traceback__)

                if include_timestamps:
                    update_hash(" %10d" % s.st_mtime)

                update_hash(" ")
                if stat.S_ISBLK(s.st_mode) or stat.S_ISCHR(s.st_mode):
                    update_hash("%9s" % ("%d.%d" % (os.major(s.st_rdev), os.minor(s.st_rdev))))
                else:
                    update_hash(" " * 9)

                filterfile = False
                for entry in filemaps:
                    if fnmatch.fnmatch(path, entry):
                        filterfile = True

                update_hash(" ")
                if stat.S_ISREG(s.st_mode) and not filterfile:
                    update_hash("%10d" % s.st_size)
                else:
                    update_hash(" " * 10)

                update_hash(" ")
                fh = hashlib.sha256()
                if stat.S_ISREG(s.st_mode):
                    # Hash file contents
                    if filterfile:
                        # Need to ignore paths in crossscripts and postinst-useradd files.
                        with open(path, 'rb') as d:
                            chunk = d.read()
                            chunk = chunk.replace(bytes(basepath, encoding='utf8'), b'')
                            for entry in filemaps:
                                if not fnmatch.fnmatch(path, entry):
                                    continue
                                for r in filemaps[entry]:
                                    if r.startswith("regex-"):
                                        chunk = re.sub(bytes(r[6:], encoding='utf8'), b'', chunk)
                                    else:
                                        chunk = chunk.replace(bytes(r, encoding='utf8'), b'')
                            fh.update(chunk)
                    else:
                        with open(path, 'rb') as d:
                            for chunk in iter(lambda: d.read(4096), b""):
                                fh.update(chunk)
                    update_hash(fh.hexdigest())
                else:
                    update_hash(" " * len(fh.hexdigest()))

                update_hash(" %s" % path)

                if stat.S_ISLNK(s.st_mode):
                    update_hash(" -> %s" % os.readlink(path))

                update_hash("\n")
コード例 #43
0
ファイル: test_model.py プロジェクト: clnperez/kimchi
    def test_vm_edit(self):
        config.set("authentication", "method", "pam")
        inst = model.Model(None, objstore_loc=self.tmp_store)

        orig_params = {
            'name': 'test',
            'memory': '1024',
            'cpus': '1',
            'cdrom': UBUNTU_ISO
        }
        inst.templates_create(orig_params)

        with RollbackContext() as rollback:
            params_1 = {'name': 'kimchi-vm1', 'template': '/templates/test'}
            params_2 = {'name': 'kimchi-vm2', 'template': '/templates/test'}
            inst.vms_create(params_1)
            rollback.prependDefer(utils.rollback_wrapper, inst.vm_delete,
                                  'kimchi-vm1')
            inst.vms_create(params_2)
            rollback.prependDefer(utils.rollback_wrapper, inst.vm_delete,
                                  'kimchi-vm2')

            vms = inst.vms_get_list()
            self.assertTrue('kimchi-vm1' in vms)

            # make sure "vm_update" works when the domain has a snapshot
            inst.vmsnapshots_create(u'kimchi-vm1')

            # update vm graphics when vm is not running
            inst.vm_update(u'kimchi-vm1', {"graphics": {"passwd": "123456"}})

            inst.vm_start('kimchi-vm1')
            rollback.prependDefer(utils.rollback_wrapper, inst.vm_poweroff,
                                  'kimchi-vm1')

            vm_info = inst.vm_lookup(u'kimchi-vm1')
            self.assertEquals('123456', vm_info['graphics']["passwd"])
            self.assertEquals(None, vm_info['graphics']["passwdValidTo"])

            # update vm graphics when vm is running
            inst.vm_update(
                u'kimchi-vm1',
                {"graphics": {
                    "passwd": "abcdef",
                    "passwdValidTo": 20
                }})
            vm_info = inst.vm_lookup(u'kimchi-vm1')
            self.assertEquals('abcdef', vm_info['graphics']["passwd"])
            self.assertGreaterEqual(20, vm_info['graphics']['passwdValidTo'])

            info = inst.vm_lookup('kimchi-vm1')
            self.assertEquals('running', info['state'])

            params = {'name': 'new-vm'}
            self.assertRaises(InvalidParameter, inst.vm_update, 'kimchi-vm1',
                              params)

            # change VM users and groups, when wm is running.
            inst.vm_update(u'kimchi-vm1', {
                'users': ['root'],
                'groups': ['root']
            })
            vm_info = inst.vm_lookup(u'kimchi-vm1')
            self.assertEquals(['root'], vm_info['users'])
            self.assertEquals(['root'], vm_info['groups'])
            # change VM users and groups by removing all elements,
            # when wm is running.
            inst.vm_update(u'kimchi-vm1', {'users': [], 'groups': []})
            vm_info = inst.vm_lookup(u'kimchi-vm1')
            self.assertEquals([], vm_info['users'])
            self.assertEquals([], vm_info['groups'])

            inst.vm_poweroff('kimchi-vm1')
            self.assertRaises(OperationFailed, inst.vm_update, 'kimchi-vm1',
                              {'name': 'kimchi-vm2'})

            params = {'name': u'пeω-∨м', 'cpus': 4, 'memory': 2048}
            inst.vm_update('kimchi-vm1', params)
            rollback.prependDefer(utils.rollback_wrapper, inst.vm_delete,
                                  u'пeω-∨м')
            self.assertEquals(info['uuid'], inst.vm_lookup(u'пeω-∨м')['uuid'])
            info = inst.vm_lookup(u'пeω-∨м')
            for key in params.keys():
                self.assertEquals(params[key], info[key])

            # change only VM users - groups are not changed (default is empty)
            users = inst.users_get_list()[:3]
            inst.vm_update(u'пeω-∨м', {'users': users})
            self.assertEquals(users, inst.vm_lookup(u'пeω-∨м')['users'])
            self.assertEquals([], inst.vm_lookup(u'пeω-∨м')['groups'])

            # change only VM groups - users are not changed (default is empty)
            groups = inst.groups_get_list()[:2]
            inst.vm_update(u'пeω-∨м', {'groups': groups})
            self.assertEquals(users, inst.vm_lookup(u'пeω-∨м')['users'])
            self.assertEquals(groups, inst.vm_lookup(u'пeω-∨м')['groups'])

            # change VM users and groups by adding a new element to each one
            users.append(pwd.getpwuid(os.getuid()).pw_name)
            groups.append(grp.getgrgid(os.getgid()).gr_name)
            inst.vm_update(u'пeω-∨м', {'users': users, 'groups': groups})
            self.assertEquals(users, inst.vm_lookup(u'пeω-∨м')['users'])
            self.assertEquals(groups, inst.vm_lookup(u'пeω-∨м')['groups'])

            # change VM users (wrong value) and groups
            # when an error occurs, everything fails and nothing is changed
            self.assertRaises(InvalidParameter, inst.vm_update, u'пeω-∨м', {
                'users': ['userdoesnotexist'],
                'groups': []
            })
            self.assertEquals(users, inst.vm_lookup(u'пeω-∨м')['users'])
            self.assertEquals(groups, inst.vm_lookup(u'пeω-∨м')['groups'])

            # change VM users and groups (wrong value)
            # when an error occurs, everything fails and nothing is changed
            self.assertRaises(InvalidParameter, inst.vm_update, u'пeω-∨м', {
                'users': [],
                'groups': ['groupdoesnotexist']
            })
            self.assertEquals(users, inst.vm_lookup(u'пeω-∨м')['users'])
            self.assertEquals(groups, inst.vm_lookup(u'пeω-∨м')['groups'])

            # change VM users and groups by removing all elements
            inst.vm_update(u'пeω-∨м', {'users': [], 'groups': []})
            self.assertEquals([], inst.vm_lookup(u'пeω-∨м')['users'])
            self.assertEquals([], inst.vm_lookup(u'пeω-∨м')['groups'])
コード例 #44
0
def GetFileInfo(fname, Links, MaxContentsReturnable, Checksum):
    """
    Return a dictionary of info for file.
    If 'Links' == 'follow', no link files will appear here,
    those links will be sent to GetDirInfo() as direcroties.
    Therefore only LStatFile is used.
    If file is link and 'Links' == 'ignore' {} is returned.
    """
    fileContentChecksum = "@{{Algoritm=%s Hash=%s Path=%s}}"

    d = {}
    if fname.endswith("omsadmin.conf"):
        return d

    if os.path.islink(fname):
        d['Type'] = 'link'
    else:
        d['Type'] = 'file'
    if d['Type'] == 'link' and Links == 'ignore':
        return {}
    stat_info = None
    stat_info = LStatFile(fname)
    if stat_info == None:
        return {}
    d['DestinationPath'] = fname
    try:
        d['Owner'] = pwd.getpwuid(stat_info.st_uid).pw_name
    except:
        d['Owner'] = str(stat_info.st_uid)
    try:
        d['Group'] = grp.getgrgid(stat_info.st_gid).gr_name
    except:
        d['Group'] = str(stat_info.st_gid)
    d['Mode'] = str(oct(stat_info.st_mode))[-3:]
    d['ModifiedDate'] = int(stat_info.st_mtime)
    d['CreatedDate'] = int(stat_info.st_ctime)
    d['FileSize'] = stat_info.st_size
    # if file size is 0
    # dont attempt to read the file
    if stat_info.st_size == 0:
        d['Contents'] = ''
        if Checksum == 'md5' or Checksum == 'sha-256':
            d['Checksum'] = ""
        elif Checksum == "ctime":
            d['Checksum'] = str(int(stat_info.st_ctime))
        else:  # Checksum == "mtime":
            d['Checksum'] = str(int(stat_info.st_mtime))
        return d

    if Checksum == 'md5' or Checksum == 'sha-256':
        fileHash = GetChecksum(fname, Checksum)
        d['Checksum'] = fileContentChecksum % (Checksum.upper(),
                                               fileHash.upper(), fname)
    elif Checksum == "ctime":
        d['Checksum'] = str(int(stat_info.st_ctime))
    else:  # Checksum == "mtime":
        d['Checksum'] = str(int(stat_info.st_mtime))
    if d['Type'] == 'link' and Links == 'manage':
        d['Contents'] = 'Symlink to ' + os.readlink(fname)
    else:
        d['Contents'], error = ReadFileLimited(fname, MaxContentsReturnable)
    if d['Contents'] is None:
        d['Contents'] = ''
    return d
コード例 #45
0
def get_file_meta(worker_name, path, cliargs, reindex_dict):
    """This is the get file meta data function.
    It scrapes file meta and ignores files smaller
    than minsize Bytes, newer than mtime
    and in excluded_files. Returns file meta dict.
    """

    try:
        filename = os.path.basename(path)

        # check if file is in exluded_files list
        extension = os.path.splitext(filename)[1][1:].strip().lower()
        if file_excluded(filename, extension, path, cliargs['verbose']):
            return None

        # use lstat to get meta and not follow sym links
        stat = os.lstat(path)
        # get file size (bytes)
        size = stat.st_size

        # Skip files smaller than minsize cli flag
        if size < cliargs['minsize']:
            return None

        # check file modified time
        mtime_unix = stat.st_mtime
        mtime_utc = \
            datetime.utcfromtimestamp(mtime_unix).strftime('%Y-%m-%dT%H:%M:%S')
        # Convert time in days (mtime cli arg) to seconds
        time_sec = cliargs['mtime'] * 86400
        file_mtime_sec = time.time() - mtime_unix
        # Only process files modified at least x days ago
        if file_mtime_sec < time_sec:
            return None

        # get access time
        atime_unix = stat.st_atime
        atime_utc = \
            datetime.utcfromtimestamp(atime_unix).strftime('%Y-%m-%dT%H:%M:%S')
        # get change time
        ctime_unix = stat.st_ctime
        ctime_utc = \
            datetime.utcfromtimestamp(ctime_unix).strftime('%Y-%m-%dT%H:%M:%S')
        # get user id of owner
        uid = stat.st_uid
        # try to get owner user name
        # first check cache
        if uid in uids:
            owner = owners[uid]
        # not in cache
        else:
            try:
                owner = pwd.getpwuid(uid).pw_name.split('\\')
                # remove domain before owner
                if len(owner) == 2:
                    owner = owner[1]
                else:
                    owner = owner[0]
            # if we can't find the owner's user name, use the uid number
            except KeyError:
                owner = uid
            # store it in cache
            if not uid in uids:
                uids.append(uid)
                owners[uid] = owner
        # get group id
        gid = stat.st_gid
        # try to get group name
        # first check cache
        if gid in gids:
            group = groups[gid]
        # not in cache
        else:
            try:
                group = grp.getgrgid(gid).gr_name.split('\\')
                # remove domain before group
                if len(group) == 2:
                    group = group[1]
                else:
                    group = group[0]
            # if we can't find the group name, use the gid number
            except KeyError:
                group = gid
            # store in cache
            if not gid in gids:
                gids.append(gid)
                groups[gid] = group
        # get inode number
        inode = stat.st_ino
        # get number of hardlinks
        hardlinks = stat.st_nlink
        # create md5 hash of file using metadata filesize and mtime
        filestring = str(size) + str(mtime_unix)
        filehash = hashlib.md5(filestring.encode('utf-8')).hexdigest()
        # get time
        indextime_utc = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%f")
        # get absolute path of parent directory
        parentdir = os.path.abspath(os.path.join(path, os.pardir))

        # create file metadata dictionary
        filemeta_dict = {
            "filename": filename,
            "extension": extension,
            "path_parent": parentdir,
            "filesize": size,
            "owner": owner,
            "group": group,
            "last_modified": mtime_utc,
            "last_access": atime_utc,
            "last_change": ctime_utc,
            "hardlinks": hardlinks,
            "inode": inode,
            "filehash": filehash,
            "tag": "",
            "tag_custom": "",
            "dupe_md5": "",
            "indexing_date": indextime_utc,
            "worker_name": worker_name,
            "_type": "file"
        }

        # check plugins for adding extra meta data to filemeta_dict
        for plugin in diskover.plugins:
            try:
                # check if plugin is for file doc
                mappings = {'mappings': {'file': {'properties': {}}}}
                plugin.add_mappings(mappings)
                filemeta_dict.update(plugin.add_meta(path))
            except KeyError:
                pass

        # add any autotags to filemeta_dict
        if cliargs['autotag'] and len(diskover.config['autotag_files']) > 0:
            auto_tag(filemeta_dict, 'file', mtime_unix, atime_unix, ctime_unix)

        # search for and copy over any existing tags from reindex_dict
        for sublist in reindex_dict['file']:
            if sublist[0] == path:
                filemeta_dict['tag'] = sublist[1]
                filemeta_dict['tag_custom'] = sublist[2]
                break

    except (IOError, OSError):
        return None

    return filemeta_dict
コード例 #46
0
def list_files():
    print(request.data)
    local_files = []
    pth = request.args.get('path', '/')

    try:
        for d in os.listdir(pth):
            stats = os.stat(os.path.join(pth, d))
            local_files += [{
                "name":
                d,
                "mode":
                permissions_to_unix_name(stats),  #"drwxr-xr-x", 
                "size":
                stats.st_size,
                "uid":
                stats.st_uid,
                "user":
                pwd.getpwuid(stats.st_uid)[0],
                "gid":
                stats.st_gid,
                "group":
                grp.getgrgid(stats.st_gid)[0],
                "mtime":
                datetime.datetime.fromtimestamp(stats.st_mtime).strftime(
                    "%Y-%m-%dT%H:%M:%S")  #, "2019-12-23T05:07:26"
            }]
    except Exception as inst:
        warnings.warn(str(inst))

    data = {
        "items": [
            {
                "name": ".",
                "mode": "drwxr-xr-x",
                "size": 24576,
                "uid": 0,
                "user": "******",
                "gid": 32,
                "group": "OMVSDFG",
                "mtime": "2019-12-23T05:07:26"
            },
            {
                "name": "..",
                "mode": "dr-xr-xr-x",
                "size": 0,
                "uid": 0,
                "user": "******",
                "gid": 2,
                "group": "TTY",
                "mtime": "2019-12-23T14:38:25"
            },
            {
                "name": "dummy-dir",
                "mode": "dr-xr-xr-x",
                "size": 0,
                "uid": 0,
                "user": "******",
                "gid": 2,
                "group": "TTY",
                "mtime": "2019-12-23T14:38:25"
            },
            {
                "name": "dummy-file",
                "mode": "-rw-r--r--",
                "size": 0,
                "uid": 0,
                "user": "******",
                "gid": 32,
                "group": "OMVSDFG",
                "mtime": "2019-06-24T06:14:50"
            },
        ],
        "returnedRows":
        4,
        "totalRows":
        4,
        "JSONversion":
        1
    }

    if len(local_files) > 0:
        data['items'].extend(local_files)
        data['returnedRows'] += len(local_files)
        data['totalRows'] += data['returnedRows']

    resp = make_response(jsonify(data), 200)
    if 'Authorization' in request.headers:
        resp.set_cookie('LtpaToken2', 'fakeToken', secure=True, httponly=True)
    return resp
コード例 #47
0
def get_dir_meta(worker_name, path, cliargs, reindex_dict):
    """This is the get directory meta data function.
    It gets directory metadata and returns dir meta dict.
    It checks if meta data is in Redis and compares times
    mtime and ctime on disk compared to Redis and if same
    returns sametimes string.
    """

    try:
        lstat_path = os.lstat(path)
        mtime_unix = lstat_path.st_mtime
        mtime_utc = datetime.utcfromtimestamp(mtime_unix) \
            .strftime('%Y-%m-%dT%H:%M:%S')
        atime_unix = lstat_path.st_atime
        atime_utc = datetime.utcfromtimestamp(atime_unix) \
            .strftime('%Y-%m-%dT%H:%M:%S')
        ctime_unix = lstat_path.st_ctime
        ctime_utc = datetime.utcfromtimestamp(ctime_unix) \
            .strftime('%Y-%m-%dT%H:%M:%S')
        if cliargs['index2']:
            # check if directory times cached in Redis
            redis_dirtime = redis_conn.get(
                base64.encodestring(path.encode('utf-8', errors='ignore')))
            if redis_dirtime:
                cached_times = float(redis_dirtime.decode('utf-8'))
                # check if cached times are the same as on disk
                current_times = float(mtime_unix + ctime_unix)
                if cached_times == current_times:
                    return "sametimes"
        # get time now in utc
        indextime_utc = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%f")
        # get user id of owner
        uid = lstat_path.st_uid
        # try to get owner user name
        # first check cache
        if uid in uids:
            owner = owners[uid]
        # not in cache
        else:
            try:
                owner = pwd.getpwuid(uid).pw_name.split('\\')
                # remove domain before owner
                if len(owner) == 2:
                    owner = owner[1]
                else:
                    owner = owner[0]
            # if we can't find the owner's user name, use the uid number
            except KeyError:
                owner = uid
            # store it in cache
            if not uid in uids:
                uids.append(uid)
                owners[uid] = owner
        # get group id
        gid = lstat_path.st_gid
        # try to get group name
        # first check cache
        if gid in gids:
            group = groups[gid]
        # not in cache
        else:
            try:
                group = grp.getgrgid(gid).gr_name.split('\\')
                # remove domain before group
                if len(group) == 2:
                    group = group[1]
                else:
                    group = group[0]
            # if we can't find the group name, use the gid number
            except KeyError:
                group = gid
            # store in cache
            if not gid in gids:
                gids.append(gid)
                groups[gid] = group

        inode = lstat_path.st_ino
        hardlinks = lstat_path.st_nlink

        filename = os.path.basename(path)
        parentdir = os.path.abspath(os.path.join(path, os.pardir))
        fullpath = os.path.abspath(os.path.join(parentdir, filename))

        dirmeta_dict = {
            "filename": filename,
            "path_parent": parentdir,
            "filesize": 0,
            "items": 1,  # 1 for itself
            "items_files": 0,
            "items_subdirs": 0,
            "last_modified": mtime_utc,
            "last_access": atime_utc,
            "last_change": ctime_utc,
            "hardlinks": hardlinks,
            "inode": inode,
            "owner": owner,
            "group": group,
            "tag": "",
            "tag_custom": "",
            "indexing_date": indextime_utc,
            "worker_name": worker_name,
            "change_percent_filesize": "",
            "change_percent_items": "",
            "change_percent_items_files": "",
            "change_percent_items_subdirs": "",
            "_type": "directory"
        }

        # check plugins for adding extra meta data to dirmeta_dict
        for plugin in diskover.plugins:
            try:
                # check if plugin is for directory doc
                mappings = {'mappings': {'directory': {'properties': {}}}}
                plugin.add_mappings(mappings)
                dirmeta_dict.update(plugin.add_meta(fullpath))
            except KeyError:
                pass

        # add any autotags to dirmeta_dict
        if cliargs['autotag'] and len(diskover.config['autotag_dirs']) > 0:
            auto_tag(dirmeta_dict, 'directory', mtime_unix, atime_unix,
                     ctime_unix)

        # search for and copy over any existing tags from reindex_dict
        for sublist in reindex_dict['directory']:
            if sublist[0] == fullpath:
                dirmeta_dict['tag'] = sublist[1]
                dirmeta_dict['tag_custom'] = sublist[2]
                break

    except (IOError, OSError):
        return None

    # cache directory times in Redis, encode path (key) using base64
    if diskover.config['redis_cachedirtimes'] == 'True' or diskover.config[
            'redis_cachedirtimes'] == 'true':
        redis_conn.set(base64.encodestring(
            path.encode('utf-8', errors='ignore')),
                       mtime_unix + ctime_unix,
                       ex=diskover.config['redis_dirtimesttl'])

    return dirmeta_dict
コード例 #48
0
    def is_unarchived(self):
        # BSD unzip doesn't support zipinfo listings with timestamp.
        cmd = [self.zipinfocmd_path, '-T', '-s', self.src]
        if self.excludes:
            cmd.extend([
                '-x',
            ] + self.excludes)
        rc, out, err = self.module.run_command(cmd)

        old_out = out
        diff = ''
        out = ''
        if rc == 0:
            unarchived = True
        else:
            unarchived = False

        # Get some information related to user/group ownership
        umask = os.umask(0)
        os.umask(umask)
        systemtype = platform.system()

        # Get current user and group information
        groups = os.getgroups()
        run_uid = os.getuid()
        run_gid = os.getgid()
        try:
            run_owner = pwd.getpwuid(run_uid).pw_name
        except:
            run_owner = run_uid
        try:
            run_group = grp.getgrgid(run_gid).gr_name
        except:
            run_group = run_gid

        # Get future user ownership
        fut_owner = fut_uid = None
        if self.file_args['owner']:
            try:
                tpw = pwd.getpwname(self.file_args['owner'])
            except:
                try:
                    tpw = pwd.getpwuid(self.file_args['owner'])
                except:
                    tpw = pwd.getpwuid(run_uid)
            fut_owner = tpw.pw_name
            fut_uid = tpw.pw_uid
        else:
            try:
                fut_owner = run_owner
            except:
                pass
            fut_uid = run_uid

        # Get future group ownership
        fut_group = fut_gid = None
        if self.file_args['group']:
            try:
                tgr = grp.getgrnam(self.file_args['group'])
            except:
                try:
                    tgr = grp.getgrgid(self.file_args['group'])
                except:
                    tgr = grp.getgrgid(run_gid)
            fut_group = tgr.gr_name
            fut_gid = tgr.gr_gid
        else:
            try:
                fut_group = run_group
            except:
                pass
            fut_gid = run_gid

        for line in old_out.splitlines():
            change = False

            pcs = line.split(None, 7)
            if len(pcs) != 8:
                # Too few fields... probably a piece of the header or footer
                continue

            # Check first and seventh field in order to skip header/footer
            if len(pcs[0]) != 7 and len(pcs[0]) != 10:
                continue
            if len(pcs[6]) != 15:
                continue

            # Possible entries:
            #   -rw-rws---  1.9 unx    2802 t- defX 11-Aug-91 13:48 perms.2660
            #   -rw-a--     1.0 hpf    5358 Tl i4:3  4-Dec-91 11:33 longfilename.hpfs
            #   -r--ahs     1.1 fat    4096 b- i4:2 14-Jul-91 12:58 EA DATA. SF
            #   --w-------  1.0 mac   17357 bx i8:2  4-May-92 04:02 unzip.macr
            if pcs[0][0] not in 'dl-?' or not frozenset(
                    pcs[0][1:]).issubset('rwxstah-'):
                continue

            ztype = pcs[0][0]
            permstr = pcs[0][1:]
            version = pcs[1]
            ostype = pcs[2]
            size = int(pcs[3])
            path = to_text(pcs[7], errors='surrogate_or_strict')

            # Skip excluded files
            if path in self.excludes:
                out += 'Path %s is excluded on request\n' % path
                continue

            # Itemized change requires L for symlink
            if path[-1] == '/':
                if ztype != 'd':
                    err += 'Path %s incorrectly tagged as "%s", but is a directory.\n' % (
                        path, ztype)
                ftype = 'd'
            elif ztype == 'l':
                ftype = 'L'
            elif ztype == '-':
                ftype = 'f'
            elif ztype == '?':
                ftype = 'f'

            # Some files may be storing FAT permissions, not Unix permissions
            # For FAT permissions, we will use a base permissions set of 777 if the item is a directory or has the execute bit set.  Otherwise, 666.
            #     This permission will then be modified by the system UMask.
            # BSD always applies the Umask, even to Unix permissions.
            # For Unix style permissions on Linux or Mac, we want to use them directly.
            #     So we set the UMask for this file to zero.  That permission set will then be unchanged when calling _permstr_to_octal

            if len(permstr) == 6:
                if path[-1] == '/':
                    permstr = 'rwxrwxrwx'
                elif permstr == 'rwx---':
                    permstr = 'rwxrwxrwx'
                else:
                    permstr = 'rw-rw-rw-'
                file_umask = umask
            elif 'bsd' in systemtype.lower():
                file_umask = umask
            else:
                file_umask = 0

            # Test string conformity
            if len(permstr) != 9 or not ZIP_FILE_MODE_RE.match(permstr):
                raise UnarchiveError('ZIP info perm format incorrect, %s' %
                                     permstr)

            # DEBUG
#            err += "%s%s %10d %s\n" % (ztype, permstr, size, path)

            dest = os.path.join(self.dest, path)
            try:
                st = os.lstat(dest)
            except:
                change = True
                self.includes.append(path)
                err += 'Path %s is missing\n' % path
                diff += '>%s++++++.?? %s\n' % (ftype, path)
                continue

            # Compare file types
            if ftype == 'd' and not stat.S_ISDIR(st.st_mode):
                change = True
                self.includes.append(path)
                err += 'File %s already exists, but not as a directory\n' % path
                diff += 'c%s++++++.?? %s\n' % (ftype, path)
                continue

            if ftype == 'f' and not stat.S_ISREG(st.st_mode):
                change = True
                unarchived = False
                self.includes.append(path)
                err += 'Directory %s already exists, but not as a regular file\n' % path
                diff += 'c%s++++++.?? %s\n' % (ftype, path)
                continue

            if ftype == 'L' and not stat.S_ISLNK(st.st_mode):
                change = True
                self.includes.append(path)
                err += 'Directory %s already exists, but not as a symlink\n' % path
                diff += 'c%s++++++.?? %s\n' % (ftype, path)
                continue

            itemized = list('.%s.......??' % ftype)

            # Note: this timestamp calculation has a rounding error
            # somewhere... unzip and this timestamp can be one second off
            # When that happens, we report a change and re-unzip the file
            dt_object = datetime.datetime(
                *(time.strptime(pcs[6], '%Y%m%d.%H%M%S')[0:6]))
            timestamp = time.mktime(dt_object.timetuple())

            # Compare file timestamps
            if stat.S_ISREG(st.st_mode):
                if self.module.params['keep_newer']:
                    if timestamp > st.st_mtime:
                        change = True
                        self.includes.append(path)
                        err += 'File %s is older, replacing file\n' % path
                        itemized[4] = 't'
                    elif stat.S_ISREG(st.st_mode) and timestamp < st.st_mtime:
                        # Add to excluded files, ignore other changes
                        out += 'File %s is newer, excluding file\n' % path
                        self.excludes.append(path)
                        continue
                else:
                    if timestamp != st.st_mtime:
                        change = True
                        self.includes.append(path)
                        err += 'File %s differs in mtime (%f vs %f)\n' % (
                            path, timestamp, st.st_mtime)
                        itemized[4] = 't'

            # Compare file sizes
            if stat.S_ISREG(st.st_mode) and size != st.st_size:
                change = True
                err += 'File %s differs in size (%d vs %d)\n' % (path, size,
                                                                 st.st_size)
                itemized[3] = 's'

            # Compare file checksums
            if stat.S_ISREG(st.st_mode):
                crc = crc32(dest)
                if crc != self._crc32(path):
                    change = True
                    err += 'File %s differs in CRC32 checksum (0x%08x vs 0x%08x)\n' % (
                        path, self._crc32(path), crc)
                    itemized[2] = 'c'

            # Compare file permissions

            # Do not handle permissions of symlinks
            if ftype != 'L':

                # Use the new mode provided with the action, if there is one
                if self.file_args['mode']:
                    if isinstance(self.file_args['mode'], int):
                        mode = self.file_args['mode']
                    else:
                        try:
                            mode = int(self.file_args['mode'], 8)
                        except Exception as e:
                            try:
                                mode = AnsibleModule._symbolic_mode_to_octal(
                                    st, self.file_args['mode'])
                            except ValueError as e:
                                self.module.fail_json(
                                    path=path,
                                    msg="%s" % to_native(e),
                                    exception=traceback.format_exc())
                # Only special files require no umask-handling
                elif ztype == '?':
                    mode = self._permstr_to_octal(permstr, 0)
                else:
                    mode = self._permstr_to_octal(permstr, file_umask)

                if mode != stat.S_IMODE(st.st_mode):
                    change = True
                    itemized[5] = 'p'
                    err += 'Path %s differs in permissions (%o vs %o)\n' % (
                        path, mode, stat.S_IMODE(st.st_mode))

            # Compare file user ownership
            owner = uid = None
            try:
                owner = pwd.getpwuid(st.st_uid).pw_name
            except:
                uid = st.st_uid

            # If we are not root and requested owner is not our user, fail
            if run_uid != 0 and (fut_owner != run_owner or fut_uid != run_uid):
                raise UnarchiveError(
                    'Cannot change ownership of %s to %s, as user %s' %
                    (path, fut_owner, run_owner))

            if owner and owner != fut_owner:
                change = True
                err += 'Path %s is owned by user %s, not by user %s as expected\n' % (
                    path, owner, fut_owner)
                itemized[6] = 'o'
            elif uid and uid != fut_uid:
                change = True
                err += 'Path %s is owned by uid %s, not by uid %s as expected\n' % (
                    path, uid, fut_uid)
                itemized[6] = 'o'

            # Compare file group ownership
            group = gid = None
            try:
                group = grp.getgrgid(st.st_gid).gr_name
            except:
                gid = st.st_gid

            if run_uid != 0 and fut_gid not in groups:
                raise UnarchiveError(
                    'Cannot change group ownership of %s to %s, as user %s' %
                    (path, fut_group, run_owner))

            if group and group != fut_group:
                change = True
                err += 'Path %s is owned by group %s, not by group %s as expected\n' % (
                    path, group, fut_group)
                itemized[6] = 'g'
            elif gid and gid != fut_gid:
                change = True
                err += 'Path %s is owned by gid %s, not by gid %s as expected\n' % (
                    path, gid, fut_gid)
                itemized[6] = 'g'

            # Register changed files and finalize diff output
            if change:
                if path not in self.includes:
                    self.includes.append(path)
                diff += '%s %s\n' % (''.join(itemized), path)

        if self.includes:
            unarchived = False

        # DEBUG


#        out = old_out + out

        return dict(unarchived=unarchived,
                    rc=rc,
                    out=out,
                    err=err,
                    cmd=cmd,
                    diff=diff)
コード例 #49
0
cli.add_argument('-d',metavar='#',help='Number of days to span (default=7)', type=int, default=7)
cli.add_argument('-e',metavar='YYYY-MM-DD',help='End date of query span, at 24:00 (default=today)', type=str, default=None)
cli.add_argument('-M',metavar='string',help='match all in job names (repeatable)', type=str, default=[], action='append')
cli.add_argument('-m',metavar='string',help='match any in job names (repeatable)', type=str, default=[], action='append')
args=cli.parse_args(sys.argv[1:])

if len(args.u)==0:
  if args.p is None:
    cli.error('At least one of -u or -p must be specified.')
  else:
    # no user defined, get all users in the project's group:
    if args.p not in project_groups:
      cli.error('Unknown project:  '+args.p)
    group=project_groups[args.p]
    gid=grp.getgrnam(group).gr_gid
    args.u.extend(grp.getgrgid(gid).gr_mem)

if args.e is not None:
  try:
    args.e=datetime.datetime.strptime(args.e,'%Y-%m-%d')
    args.e+=datetime.timedelta(days=1)
  except:
    cli.error('Invalid date format:  '+args.e)

for user in args.u:
  try:
    pwd.getpwnam(user)
  except:
    print('Unknown user: '+user)
    continue
  sq=SlurmQuery(user,args.p)
コード例 #50
0
ファイル: usrgrp.py プロジェクト: afazekas/speedling
def _grp_str(group):
    if isinstance(group, numbers.Integral):
        return grp.getgrgid(group)[0]
    return group
コード例 #51
0
ファイル: rpmtools.py プロジェクト: amplify-education/bcfg2
def rpm_verify_file(fileinfo, rpmlinktos, omitmask):
    """
        Verify all the files in a package.

        Returns a list of error flags, the file type and file name.  The list
        entries are strings that are the same as the labels for the bitwise
        flags used in the C code.

    """
    (fname, fsize, fmode, fmtime, fflags, frdev, finode, fnlink, fstate, \
            vflags, fuser, fgroup, fmd5) = fileinfo

    # 1. rpmtsRootDir stuff.  What does it do and where to I get it from?

    file_results = []
    flags = vflags

    # Check to see if the file was installed - if not pretend all is ok.
    # This is what the rpm C code does!
    if fstate != rpm.RPMFILE_STATE_NORMAL:
        return file_results

    # Get the installed files stats
    try:
        lstat = os.lstat(fname)
    except OSError:
        if not (fflags & (rpm.RPMFILE_MISSINGOK | rpm.RPMFILE_GHOST)):
            file_results.append('RPMVERIFY_LSTATFAIL')
            #file_results.append(fname)
        return file_results

    # 5. Contexts?  SELinux stuff?

    # Setup what checks to do.  This is straight out of the C code.
    if stat.S_ISDIR(lstat.st_mode):
        flags &= DIR_FLAGS
    elif stat.S_ISLNK(lstat.st_mode):
        flags &= LINK_FLAGS
    elif stat.S_ISFIFO(lstat.st_mode):
        flags &= FIFO_FLAGS
    elif stat.S_ISCHR(lstat.st_mode):
        flags &= CHR_FLAGS
    elif stat.S_ISBLK(lstat.st_mode):
        flags &= BLK_FLAGS
    else:
        flags &= REG_FLAGS

    if (fflags & rpm.RPMFILE_GHOST):
        flags &= GHOST_FLAGS

    flags &= ~(omitmask | RPMVERIFY_FAILURES)

    # 8. SELinux stuff.

    prelink_size = 0
    if flags & RPMVERIFY_MD5:
        prelink_md5, prelink_size = prelink_md5_check(fname)
        if prelink_md5 == False:
            file_results.append('RPMVERIFY_MD5')
            file_results.append('RPMVERIFY_READFAIL')
        elif prelink_md5 != fmd5:
            file_results.append('RPMVERIFY_MD5')

    if flags & RPMVERIFY_LINKTO:
        linkto = os.readlink(fname)
        if not linkto:
            file_results.append('RPMVERIFY_READLINKFAIL')
            file_results.append('RPMVERIFY_LINKTO')
        else:
            if len(rpmlinktos) == 0 or linkto != rpmlinktos:
                file_results.append('RPMVERIFY_LINKTO')

    if flags & RPMVERIFY_FILESIZE:
        if not (flags & RPMVERIFY_MD5):  # prelink check hasn't been done.
            prelink_size = prelink_size_check(fname)
        if (prelink_size != 0):  # This is a prelinked file.
            if (prelink_size != fsize):
                file_results.append('RPMVERIFY_FILESIZE')
        elif lstat.st_size != fsize:  # It wasn't a prelinked file.
            file_results.append('RPMVERIFY_FILESIZE')

    if flags & RPMVERIFY_MODE:
        metamode = fmode
        filemode = lstat.st_mode

        # Comparing the type of %ghost files is meaningless, but perms are ok.
        if fflags & rpm.RPMFILE_GHOST:
            metamode &= ~0xf000
            filemode &= ~0xf000

        if (stat.S_IFMT(metamode) != stat.S_IFMT(filemode)) or \
           (stat.S_IMODE(metamode) != stat.S_IMODE(filemode)):
            file_results.append('RPMVERIFY_MODE')

    if flags & RPMVERIFY_RDEV:
        if (stat.S_ISCHR(fmode) != stat.S_ISCHR(lstat.st_mode)
                or stat.S_ISBLK(fmode) != stat.S_ISBLK(lstat.st_mode)):
            file_results.append('RPMVERIFY_RDEV')
        elif (s_isdev(fmode) & s_isdev(lstat.st_mode)):
            st_rdev = lstat.st_rdev
            if frdev != st_rdev:
                file_results.append('RPMVERIFY_RDEV')

    if flags & RPMVERIFY_MTIME:
        if lstat.st_mtime != fmtime:
            file_results.append('RPMVERIFY_MTIME')

    if flags & RPMVERIFY_USER:
        try:
            user = pwd.getpwuid(lstat.st_uid)[0]
        except KeyError:
            user = None
        if not user or not fuser or (user != fuser):
            file_results.append('RPMVERIFY_USER')

    if flags & RPMVERIFY_GROUP:
        try:
            group = grp.getgrgid(lstat.st_gid)[0]
        except KeyError:
            group = None
        if not group or not fgroup or (group != fgroup):
            file_results.append('RPMVERIFY_GROUP')

    return file_results
コード例 #52
0
ファイル: pathlib.py プロジェクト: taol/cpython
 def group(self):
     """
     Return the group name of the file gid.
     """
     import grp
     return grp.getgrgid(self.stat().st_gid).gr_name
コード例 #53
0
    def _load_devices(self, first_run=False):
        """
        Go through supported devices and load them

        Loops through the available hardware classes, loops through
        each device in the system and adds it if needs be.
        """
        if first_run:
            # Just some pretty output
            max_name_len = max(
                [len(cls.__name__) for cls in self._device_classes]) + 2
            for cls in self._device_classes:
                format_str = 'Loaded device specification: {0:-<' + str(
                    max_name_len) + '} ({1:04x}:{2:04X})'

                self.logger.debug(
                    format_str.format(cls.__name__ + ' ', cls.USB_VID,
                                      cls.USB_PID))

        if self._test_dir is not None:
            device_list = os.listdir(self._test_dir)
            test_mode = True
        else:
            device_list = list(
                self._udev_context.list_devices(subsystem='hid'))
            test_mode = False

        device_number = 0
        for device in device_list:

            for device_class in self._device_classes:
                # Interoperability between generic list of 0000:0000:0000.0000 and pyudev
                if test_mode:
                    sys_name = device
                    sys_path = os.path.join(self._test_dir, device)
                else:
                    sys_name = device.sys_name
                    sys_path = device.sys_path

                if sys_name in self._razer_devices:
                    continue

                if device_class.match(
                        sys_name, sys_path
                ):  # Check it matches sys/ ID format and has device_type file
                    self.logger.info('Found device.%d: %s', device_number,
                                     sys_name)

                    # TODO add testdir support
                    # Basically find the other usb interfaces
                    device_match = sys_name.split('.')[0]
                    additional_interfaces = []
                    if not test_mode:
                        for alt_device in device_list:
                            if device_match in alt_device.sys_name and alt_device.sys_name != sys_name:
                                additional_interfaces.append(
                                    alt_device.sys_path)

                    # Checking permissions
                    test_file = os.path.join(sys_path, 'device_type')
                    file_group_id = os.stat(test_file).st_gid
                    file_group_name = grp.getgrgid(file_group_id)[0]

                    if os.getgid(
                    ) != file_group_id and file_group_name != 'plugdev':
                        self.logger.critical(
                            "Could not access {0}/device_type, file is not owned by plugdev"
                            .format(sys_path))
                        break

                    razer_device = device_class(
                        sys_path,
                        device_number,
                        self._config,
                        testing=self._test_dir is not None,
                        additional_interfaces=sorted(additional_interfaces))

                    # Wireless devices sometimes dont listen
                    count = 0
                    while count < 3:
                        # Loop to get serial, exit early if it gets one
                        device_serial = razer_device.get_serial()
                        if len(device_serial) > 0:
                            break
                        time.sleep(0.1)
                        count += 1
                    else:
                        logging.warning(
                            "Could not get serial for device {0}. Skipping".
                            format(sys_name))
                        continue

                    self._razer_devices.add(sys_name, device_serial,
                                            razer_device)

                    device_number += 1
コード例 #54
0
    def getContent(self):
        if self._content_mode == "full":
            try:
                st = os.stat(self._fullpath)
            except IOError:
                return "Failed To Get File Information: %s" % (self._fullpath)
            else:
                file_size = st[ST_SIZE]
                file_mtime = time.asctime(time.localtime(st[ST_MTIME]))
                file_ctime = time.asctime(time.localtime(st[ST_CTIME]))
                file_atime = time.asctime(time.localtime(st[ST_ATIME]))

                src = self.getURL(self._relpath,
                                  content_mode="raw",
                                  thumbnail="medium")
                href = self.getURL(self._relpath, content_mode="raw")
                downlink = self.getURL(self._relpath,
                                       content_mode="raw",
                                       download="true")

                xmlroot = etree.Element('{%s}image' % self._namespace_uri,
                                        nsmap=self.nsmap,
                                        name=os.path.basename(self._relpath),
                                        src=src,
                                        href=href,
                                        resurl=self._web_support.resurl,
                                        downlink=downlink)

                xmlchild = etree.SubElement(xmlroot,
                                            "filename",
                                            nsmap=self.nsmap)
                xmlchild.text = os.path.basename(self._fullpath)

                xmlchild = etree.SubElement(xmlroot, "path", nsmap=self.nsmap)
                xmlchild.text = os.path.dirname(self._fullpath)

                xmlchild = etree.SubElement(xmlroot,
                                            "filesize",
                                            nsmap=self.nsmap)
                xmlchild.text = self.ConvertUserFriendlySize(file_size)

                xmlchild = etree.SubElement(xmlroot, "mtime", nsmap=self.nsmap)
                xmlchild.text = file_mtime

                xmlchild = etree.SubElement(xmlroot, "ctime", nsmap=self.nsmap)
                xmlchild.text = file_ctime

                xmlchild = etree.SubElement(xmlroot, "atime", nsmap=self.nsmap)
                xmlchild.text = file_atime

                # File Permissions
                xmlchild = etree.SubElement(xmlroot,
                                            "permissions",
                                            nsmap=self.nsmap)
                xmlchild.text = self.ConvertUserFriendlyPermissions(
                    st[ST_MODE])

                # User and Group
                if platform.system() == "Linux":
                    try:
                        username = pwd.getpwuid(st[ST_UID])[0]
                    except KeyError:
                        username = ""
                    groupname = grp.getgrgid(st[ST_GID])[0]
                    xmlchild = etree.SubElement(xmlroot,
                                                "owner",
                                                nsmap=self.nsmap)
                    xmlchild.text = "%s:%s" % (username, groupname)
                try:
                    magicstore = magic.open(magic.MAGIC_MIME)
                    magicstore.load()
                    contenttype = magicstore.file(
                        os.path.realpath(self._fullpath)
                    )  # real path to resolve symbolic links outside of dataroot
                except AttributeError:
                    contenttype = magic.from_file(os.path.realpath(
                        self._fullpath),
                                                  mime=True)
                if contenttype is None:
                    contenttype = "text/plain"
                xmlchild = etree.SubElement(xmlroot,
                                            "contenttype",
                                            nsmap=self.nsmap)
                xmlchild.text = contenttype

                img = Image.open(self._fullpath)
                xmlchild = etree.SubElement(xmlroot,
                                            "imgsize",
                                            nsmap=self.nsmap)
                xmlchild.text = "%s x %s pixels" % img.size
                xmlchild = etree.SubElement(xmlroot,
                                            "imgmode",
                                            nsmap=self.nsmap)
                xmlchild.text = img.mode

                pmdfile = os.path.splitext(self._fullpath)[0] + '.pmd'
                if os.access(pmdfile, os.R_OK) and os.path.exists(pmdfile):
                    f = open(pmdfile, 'r')
                    pmddoc = etree.parse(f)
                    f.close()
                    pmdroot = pmddoc.getroot()
                    xmlroot.append(pmdroot)
                    pass

                f = open(self._fullpath, 'rb')
                exiftags = EXIF.process_file(f)
                f.seek(0)
                ricohtags = RMETA.process_file(f)
                f.close()

                xmlchild = etree.SubElement(xmlroot,
                                            "exiftags",
                                            nsmap=self.nsmap)
                x = exiftags.keys()
                x.sort()
                for tag in x:
                    if tag in ('JPEGThumbnail', 'TIFFThumbnail'):
                        continue
                    newxmltag = etree.SubElement(xmlchild,
                                                 "tag",
                                                 nsmap=self.nsmap,
                                                 name=tag)
                    tagtext = exiftags[tag].printable
                    if not isinstance(tagtext, basestring):
                        newxmltag.text = repr(tagtext)
                    else:
                        newxmltag.text = tagtext
                    pass

                xmlchild = etree.SubElement(xmlroot,
                                            "ricohtags",
                                            nsmap=self.nsmap)
                for tag in ricohtags:
                    newxmltag = etree.SubElement(xmlchild,
                                                 "tag",
                                                 nsmap=self.nsmap,
                                                 name=str(tag))
                    tagtext = ricohtags[tag]
                    if not isinstance(tagtext, basestring):
                        newxmltag.text = repr(tagtext)
                    else:
                        newxmltag.text = tagtext
                    pass

                return xmlroot
        elif self._content_mode == "summary" or self._content_mode == "title":
            link = self.getURL(self._relpath)
            src = self.getURL(self._relpath,
                              content_mode="raw",
                              thumbnail="gallery")
            href = self.getURL(self._relpath, content_mode="raw")
            downlink = self.getURL(self._relpath,
                                   content_mode="raw",
                                   download="true")
            xmlroot = etree.Element('{%s}image' % self._namespace_uri,
                                    nsmap=self.nsmap,
                                    name=os.path.basename(self._relpath),
                                    link=link,
                                    src=src,
                                    href=href,
                                    downlink=downlink)
            return xmlroot
        elif self._content_mode == "raw":
            try:
                magicstore = magic.open(magic.MAGIC_MIME)
                magicstore.load()
                contenttype = magicstore.file(
                    os.path.realpath(self._fullpath)
                )  # real path to resolve symbolic links outside of dataroot
            except AttributeError:
                contenttype = magic.from_file(os.path.realpath(self._fullpath),
                                              mime=True)
            if contenttype is None:
                contenttype = "text/plain"
            if "thumbnail" in self._web_support.req.form:
                ext = os.path.splitext(self._fullpath)[1]
                if ext == '.tif' or ext == '.tiff':
                    ext = '.png'
                    contenttype = 'image/png'
                if self._web_support.req.form['thumbnail'].value == "small":
                    tagname = "small"
                    newsize = (150, 150)
                elif self._web_support.req.form['thumbnail'].value == "medium":
                    tagname = "medium"
                    newsize = (300, 300)
                elif self._web_support.req.form['thumbnail'].value == "large":
                    tagname = "large"
                    newsize = (500, 500)
                elif self._web_support.req.form[
                        'thumbnail'].value == "gallery":
                    tagname = "gallery"
                    newsize = (201, 201)
                else:
                    tagname = "small"
                    newsize = (150, 150)
                if self.CacheFileExists(tagname, extension=ext):
                    size = os.path.getsize(
                        self.getCacheFileName(tagname, extension=ext))
                    f = self.getCacheFileHandler('rb', tagname, extension=ext)
                    self._web_support.req.response_headers[
                        'Content-Type'] = contenttype
                    self._web_support.req.response_headers[
                        'Content-Length'] = str(size)
                    self._web_support.req.start_response(
                        self._web_support.req.status,
                        self._web_support.req.response_headers.items())
                    self._web_support.req.output_done = True
                    if 'wsgi.file_wrapper' in self._web_support.req.environ:
                        return self._web_support.req.environ[
                            'wsgi.file_wrapper'](f, 1024)
                    else:
                        return iter(lambda: f.read(1024), '')
                else:
                    img = Image.open(self._fullpath)
                    format = img.format
                    img.thumbnail(newsize, Image.ANTIALIAS)
                    output = StringIO.StringIO()
                    img.save(output, format=format)
                    f = self.getCacheFileHandler('wb', tagname, extension=ext)
                    img.save(f)
                    f.close()
                    self._web_support.req.response_headers[
                        'Content-Type'] = contenttype
                    self._web_support.req.start_response(
                        self._web_support.req.status,
                        self._web_support.req.response_headers.items())
                    self._web_support.req.output_done = True
                    return [output.getvalue()]
            else:
                size = os.path.getsize(self._fullpath)
                f = open(self._fullpath, "rb")
                self._web_support.req.response_headers[
                    'Content-Type'] = contenttype
                self._web_support.req.response_headers['Content-Length'] = str(
                    size)
                if "download" in self._web_support.req.form:
                    self._web_support.req.response_headers[
                        'Content-Disposition'] = "attachment; filename=" + os.path.basename(
                            self._fullpath)
                self._web_support.req.start_response(
                    self._web_support.req.status,
                    self._web_support.req.response_headers.items())
                self._web_support.req.output_done = True
                if 'wsgi.file_wrapper' in self._web_support.req.environ:
                    return self._web_support.req.environ['wsgi.file_wrapper'](
                        f, 1024)
                else:
                    return iter(lambda: f.read(1024), '')
        else:
            raise self.RendererException("Invalid Content Mode")
        pass
コード例 #55
0
def groupowner(path): #returns group owner of the file or directory
  stat_info = os.stat(path)
  gid = stat_info.st_gid
  group = grp.getgrgid(gid)[0]
  return group
コード例 #56
0
ファイル: common.py プロジェクト: xuande/pydoop
def get_groups(user=DEFAULT_USER):
    groups = set(_.gr_name for _ in grp.getgrall() if user in set(_.gr_mem))
    primary_gid = pwd.getpwnam(user).pw_gid
    groups.add(grp.getgrgid(primary_gid).gr_name)
    return groups
コード例 #57
0
    def get_file_struct(self, fs_path, new_level=False):
        """Generate file meta data from file abs path.

        Return the meta data as a dict structure and a binary string

        :param fs_path: file abs path
        :param new_level:
        :return: file data structure
        """

        # Get file inode information, whether the file is a regular
        # file or a symbolic link
        try:
            os_stat = os.lstat(fs_path)
        except (OSError, IOError) as error:
            raise Exception('[*] Error on file stat: {}'.format(error))

        file_mode = os_stat.st_mode
        # Get file type. If file type is a link it returns also the
        # file pointed by the link
        file_type, lname = self.get_file_type(file_mode, fs_path)

        # If file_type is a socket return False
        if file_type == 's':
            return False, False

        ctime = int(os_stat.st_ctime)
        mtime = int(os_stat.st_mtime)
        uname = pwd.getpwuid(os_stat.st_uid)[0]
        gname = grp.getgrgid(os_stat.st_gid)[0]

        dev = os_stat.st_dev
        inumber = os_stat.st_ino
        nlink = os_stat.st_nlink
        uid = os_stat.st_uid
        gid = os_stat.st_gid
        size = os_stat.st_size
        devmajor = os.major(dev)
        devminor = os.minor(dev)

        level_id = '0000'
        if new_level:
            level_id = '1111'

        # build file meta data as dictionary
        inode_dict = {
            'inode': {
                'inumber': inumber,
                'nlink': nlink,
                'mode': file_mode,
                'uid': uid,
                'gid': gid,
                'size': size,
                'devmajor': devmajor,
                'devminor': devminor,
                'mtime': mtime,
                'ctime': ctime,
                'uname': uname,
                'gname': gname,
                'ftype': file_type,
                'lname': lname,
                'rsync_block_size': RSYNC_BLOCK_SIZE,
                'level_id': level_id,
                'deleted': '0000'
            }
        }

        # build file meta data as binary string
        inode_bin_str = (b'{}\00{}\00{}\00{}\00{}'
                         b'\00{}\00{}\00{}\00{}\00{}'
                         b'\00{}\00{}\00{}\00{}\00{}\00{}\00{}\00{}').format(
                             RSYNC_DATA_STRUCT_VERSION, file_mode, uid, gid,
                             size, mtime, ctime, uname, gname, file_type,
                             lname, inumber, nlink, devminor, devmajor,
                             RSYNC_BLOCK_SIZE, level_id, '0000')

        return inode_dict, inode_bin_str
コード例 #58
0
def main():
    module = AnsibleModule(
        argument_spec=dict(
            path=dict(type='path', required=True),
            follow=dict(type='bool', default=False),
            get_md5=dict(type='bool'),
            get_checksum=dict(type='bool', default=True),
            get_mime=dict(type='bool',
                          default=True,
                          aliases=['mime', 'mime_type', 'mime-type']),
            get_attributes=dict(type='bool',
                                default=True,
                                aliases=['attr', 'attributes']),
            checksum_algorithm=dict(type='str',
                                    default='sha1',
                                    choices=[
                                        'md5', 'sha1', 'sha224', 'sha256',
                                        'sha384', 'sha512'
                                    ],
                                    aliases=['checksum', 'checksum_algo']),
        ),
        supports_check_mode=True,
    )

    path = module.params.get('path')
    b_path = to_bytes(path, errors='surrogate_or_strict')
    follow = module.params.get('follow')
    get_mime = module.params.get('get_mime')
    get_attr = module.params.get('get_attributes')
    get_md5 = module.params.get('get_md5')

    # get_md5 will be an undocumented option in 2.9 to be removed at a later
    # date if possible (3.0+)
    if get_md5:
        module.deprecate(
            "get_md5 has been deprecated along with the md5 return value, use "
            "get_checksum=True and checksum_algorithm=md5 instead", 2.9)
    else:
        get_md5 = False
    get_checksum = module.params.get('get_checksum')
    checksum_algorithm = module.params.get('checksum_algorithm')

    # main stat data
    try:
        if follow:
            st = os.stat(b_path)
        else:
            st = os.lstat(b_path)
    except OSError as e:
        if e.errno == errno.ENOENT:
            output = {'exists': False}
            module.exit_json(changed=False, stat=output)

        module.fail_json(msg=e.strerror)

    # process base results
    output = format_output(module, path, st)

    # resolved permissions
    for perm in [('readable', os.R_OK), ('writeable', os.W_OK),
                 ('executable', os.X_OK)]:
        output[perm[0]] = os.access(b_path, perm[1])

    # symlink info
    if output.get('islnk'):
        output['lnk_source'] = os.path.realpath(b_path)
        output['lnk_target'] = os.readlink(b_path)

    try:  # user data
        pw = pwd.getpwuid(st.st_uid)
        output['pw_name'] = pw.pw_name
    except (TypeError, KeyError):
        pass

    try:  # group data
        grp_info = grp.getgrgid(st.st_gid)
        output['gr_name'] = grp_info.gr_name
    except (KeyError, ValueError, OverflowError):
        pass

    # checksums
    if output.get('isreg') and output.get('readable'):
        if get_md5:
            # Will fail on FIPS-140 compliant systems
            try:
                output['md5'] = module.md5(b_path)
            except ValueError:
                output['md5'] = None

        if get_checksum:
            output['checksum'] = module.digest_from_file(
                b_path, checksum_algorithm)

    # try to get mime data if requested
    if get_mime:
        output['mimetype'] = output['charset'] = 'unknown'
        mimecmd = module.get_bin_path('file')
        if mimecmd:
            mimecmd = [mimecmd, '-i', b_path]
            try:
                rc, out, err = module.run_command(mimecmd)
                if rc == 0:
                    mimetype, charset = out.split(':')[1].split(';')
                    output['mimetype'] = mimetype.strip()
                    output['charset'] = charset.split('=')[1].strip()
            except Exception:
                pass

    # try to get attr data
    if get_attr:
        output['version'] = None
        output['attributes'] = []
        output['attr_flags'] = ''
        out = module.get_file_attributes(b_path)
        for x in ('version', 'attributes', 'attr_flags'):
            if x in out:
                output[x] = out[x]

    module.exit_json(changed=False, stat=output)
コード例 #59
0
ファイル: system_pi.py プロジェクト: kizniche/Mycodo
 def getgroups(user):
     gids = [g.gr_gid for g in grp.getgrall() if user in g.gr_mem]
     gid = pwd.getpwnam(user).pw_gid
     if grp.getgrgid(gid).gr_gid not in gids:
         gids.append(int(grp.getgrgid(gid).gr_gid))
     return gids
コード例 #60
0
ファイル: configurator.py プロジェクト: nodeg/koan
    def configure_files(self):
        """ Configure file resources."""
        print("- Configuring Files")
        runtime_start = time.time()
        nsync = 0
        osync = 0
        fail = 0
        files = self.config['files']
        # Split out files
        _files = [f for f in files if files[f]['is_dir'] is False]

        for file in _files:
            action = files[file]['action']
            ofile = files[file]['path']

            if action == 'create':
                nmode = int(files[file]['mode'], 8)
                nuid = pwd.getpwnam(files[file]['owner'])[2]
                ngid = grp.getgrnam(files[file]['group'])[2]

                # Stage a tempfile to hold new file contents
                _tempfile = tempfile.NamedTemporaryFile()
                _tempfile.write(files[file]['content'])
                _tempfile.flush()
                nfile = _tempfile.name

                # Compare new and old files, sync if permissions or contents
                # mismatch
                if os.path.isfile(ofile):
                    fstat = os.stat(ofile)
                    omode = stat.S_IMODE(fstat.st_mode)
                    ouid = pwd.getpwuid(fstat.st_uid)[2]
                    ogid = grp.getgrgid(fstat.st_gid)[2]
                    if not filecmp.cmp(
                            ofile, nfile
                    ) or omode != nmode or ogid != ngid or ouid != nuid:
                        utils.sync_file(ofile, nfile, nuid, ngid, nmode)
                        osync += 1
                    else:
                        nsync += 1
                elif os.path.dirname(ofile):
                    # Create the file only if the base directory exists
                    open(ofile, 'w').close()
                    utils.sync_file(ofile, nfile, nuid, ngid, nmode)
                    osync += 1
                else:
                    print("  Base directory not found, %s required." %
                          (os.path.dirname(ofile)))
                    fail += 1
                _tempfile.close()
            elif action == 'remove':
                if os.path.isfile(file):
                    os.remove(ofile)
                    osync += 1
                else:
                    nsync += 1
            else:
                pass

        runtime_end = time.time()
        runtime = (runtime_end - runtime_start)
        self.stats['files'] = {
            'runtime': runtime,
            'nsync': nsync,
            'osync': osync,
            'fail': fail
        }