コード例 #1
0
ファイル: logger.py プロジェクト: afeset/miner2-tools
    def logEnv(self, header=None):
        """ Collecte and log various enviroment info such as hostname, argumnets """
    
        if header:
            self.debug("Logger-env: prog=%s header='%s'" % (path.progName(), header)  )
             
        self.debug("Logger-env: progPath=%s"     % path.progPath()  )
        self.debug("Logger-env: progAbsPath=%s"  % path.progAbsPath()  )
        self.debug("Logger-env: date=%s"         % time.asctime()    )
        self.debug("Logger-env: argv=%s"         % str(sys.argv)     )
        self.debug("Logger-env: cwd=%s"          % str(os.getcwd())  )
      
        uid = os.getuid()    ;  euid = os.geteuid()
        gid = os.getgid()    ;  egid = os.getegid() 
        self.debug("Logger-env: uid=%s(%d) euid=%s(%d) guid=%s(%d) egid=%s(%d)" % 
                    ( pwd.getpwuid(uid)[0], uid, pwd.getpwuid(euid)[0], euid,
                      grp.getgrgid(gid)[0], gid, grp.getgrgid(egid)[0], egid ) )
                        
        self.debug("Logger-env: pid=%d  ppid=%d  pgpid=%d"  % ( os.getpid(), os.getppid(), os.getpgrp() ) )
        self.debug("Logger-env: hostname=%s  uname=%s"      % ( socket.gethostname(), str(os.uname())   ) )
      
        self.debug("Logger-env: pythonVersion=%s" % sys.version.replace("\n","")  )
        self.debug("Logger-env: pythonPath=%s"    % str(sys.path)  )

        # Additonal info at lower lever
        for v in sorted(os.environ.keys()):
            self.debug("Logger-env:     os.environ[%s]=%s"  % (v, os.environ[v]) )
コード例 #2
0
ファイル: util.py プロジェクト: Web5design/SublimeLinter3
def create_tempdir():
    """Create a directory within the system temp directory used to create temp files."""
    try:
        if os.path.isdir(tempdir):
            shutil.rmtree(tempdir)

        os.mkdir(tempdir)

        # Make sure the directory can be removed by anyone in case the user
        # runs ST later as another user.
        os.chmod(tempdir, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)

    except PermissionError:
        if sublime.platform() != 'windows':
            current_user = pwd.getpwuid(os.geteuid())[0]
            temp_uid = os.stat(tempdir).st_uid
            temp_user = pwd.getpwuid(temp_uid)[0]
            message = (
                'The SublimeLinter temp directory:\n\n{0}\n\ncould not be cleared '
                'because it is owned by \'{1}\' and you are logged in as \'{2}\'. '
                'Please use sudo to remove the temp directory from a terminal.'
            ).format(tempdir, temp_user, current_user)
        else:
            message = (
                'The SublimeLinter temp directory ({}) could not be reset '
                'because it belongs to a different user.'
            ).format(tempdir)

        sublime.error_message(message)

    from . import persist
    persist.debug('temp directory:', tempdir)
コード例 #3
0
ファイル: test_memory_cache.py プロジェクト: 3van/sssd
def test_removed_mc(ldap_conn, sanity_rfc2307):
    """
    Regression test for ticket:
    https://fedorahosted.org/sssd/ticket/2726
    """

    ent.assert_passwd_by_name(
        'user1',
        dict(name='user1', passwd='*', uid=1001, gid=2001,
             gecos='1001', shell='/bin/bash'))
    ent.assert_passwd_by_uid(
        1001,
        dict(name='user1', passwd='*', uid=1001, gid=2001,
             gecos='1001', shell='/bin/bash'))

    ent.assert_group_by_name("group1", dict(name="group1", gid=2001))
    ent.assert_group_by_gid(2001, dict(name="group1", gid=2001))
    stop_sssd()

    # remove cache without invalidation
    for path in os.listdir(config.MCACHE_PATH):
        os.unlink(config.MCACHE_PATH + "/" + path)

    # sssd is stopped; so the memory cache should not be used
    # in long living clients (py.test in this case)
    with pytest.raises(KeyError):
        pwd.getpwnam('user1')
    with pytest.raises(KeyError):
        pwd.getpwuid(1001)

    with pytest.raises(KeyError):
        grp.getgrnam('group1')
    with pytest.raises(KeyError):
        grp.getgrgid(2001)
コード例 #4
0
ファイル: ca-make.py プロジェクト: joshuacoddingyou/python
def CheckedMakeDir(dirname,perms=0,uid=0,gid=0):

  if not dirname:
    return

  if os.path.exists(dirname):
    # Directory does already exist
    if not os.path.isdir(dirname):
      sys.stderr.write('Warning: %s already exists but is no directory.\n' % (dirname))
  else:
    # Create directory 
    try:
      os.makedirs(dirname)
      sys.stdout.write('Created directory %s\n' % (dirname))
    except OSError:
      sys.stderr.write('Error: Could not create directory %s.\n' % (dirname))
      return

  # Get current file stat info
  fstat = os.stat(dirname)

  if perms:
    os.chmod(dirname,perms)
    sys.stdout.write('Changed permissions of %s to %o\n' % (dirname,perms))

  if (uid and fstat[stat.ST_UID]!=uid) or \
     (gid and fstat[stat.ST_GID]!=gid):
    if not uid:
      uid=fstat[stat.ST_UID]
    if not gid:
      gid=pwd.getpwuid(uid)[3]
    os.chown(dirname,uid,gid)
    sys.stdout.write('Changed owner/group of %s to %s.%s\n' % (dirname,pwd.getpwuid(uid)[0],grp.getgrgid(gid)[0]))
コード例 #5
0
ファイル: env.py プロジェクト: Svedrin/spectrum
def check_writable( node, uid=None ):
	if uid == None:
		user = pwd.getpwuid( get_uid() )
	else:   
		user = pwd.getpwuid( uid )

	s = os.stat( node )
	groups = [g for g in grp.getgrall() if user.pw_name in g.gr_mem]
	groups.append( user.pw_gid )
	cmd = ''
	if s.st_uid == user.pw_uid:
		# user owns the file
		if s.st_mode & stat.S_IWUSR == 0:
			cmd = 'chmod u+w %s' %(node)
		if os.path.isdir( node ) and s.st_mode & stat.S_IXUSR == 0:
			cmd = 'chmod u+wx %s' %(node)
	elif s.st_gid in groups:
		if s.st_mode & stat.S_IWGRP == 0:
			cmd = 'chmod g+w %s' %(node)
		if os.path.isdir( node ) and s.st_mode & stat.S_IXGRP == 0:
			cmd = 'chmod g+wx %s' %(node)
	else:   
		if s.st_mode & stat.S_IWOTH == 0:
			cmd = 'chmod o+w %s' %(node)
		if os.path.isdir( node ) and s.st_mode & stat.S_IXOTH == 0:
			cmd = 'chmod o+wx %s' %(node)

	if cmd != '':
		raise RuntimeError( node, 'Not writable (fix with %s)' %(cmd) )
コード例 #6
0
def get_username():
   comment = pwd.getpwuid(os.getuid())[4]
   name = comment.split(',')[0]
   if name == "":
       return pwd.getpwuid(os.getuid())[0]

   return name
コード例 #7
0
ファイル: util.py プロジェクト: PaulNendick/circus
    def to_uid(name):  # NOQA
        """Return an uid, given a user name.
        If the name is an integer, make sure it's an existing uid.

        If the user name is unknown, raises a ValueError.
        """
        try:
            name = int(name)
        except ValueError:
            pass

        if isinstance(name, int):
            try:
                pwd.getpwuid(name)
                return name
            except KeyError:
                raise ValueError("%r isn't a valid user id" % name)

        from circus.py3compat import string_types  # circular import fix

        if not isinstance(name, string_types):
            raise TypeError(name)

        try:
            return pwd.getpwnam(name).pw_uid
        except KeyError:
            raise ValueError("%r isn't a valid user name" % name)
コード例 #8
0
ファイル: start.py プロジェクト: durandj/mymcadmin
    def _convert_user(user):
        if isinstance(user, int):
            pwd.getpwuid(user)

            return user
        else:
            return pwd.getpwnam(user).pw_uid
コード例 #9
0
ファイル: vertexdb_fs.py プロジェクト: hassy/fuse-vertexdb
    def getattr(self, path, fh):
        now = time() # FIXME:
        uid = pwd.getpwuid(os.getuid()).pw_uid
        gid = pwd.getpwuid(os.getuid()).pw_gid
        if self.vdb.is_dir(path):
            try:
                size = self.vdb.size(path)
            except:
                raise OSError(ENOENT, "")

            if platform.system() == "Darwin":
                st_nlink = size
            elif platform.system() == "Linux":
                st_nlink = size + 2
                
            return dict(st_mode=(S_IFDIR|0766), st_ctime=now, st_mtime=now, st_atime=now, st_nlink=st_nlink, st_uid=uid, st_gid=gid)
        else:
            try:
                data = self.vdb.read(path)
            except:
                raise OSError(ENOENT, "")

            if data == "null":
                raise OSError(ENOENT, "")
                
            return dict(st_mode=(S_IFREG|0666), st_size=len(data), st_ctime=now, st_mtime=now, st_atime=now, st_nlink=1, st_uid=uid, st_gid=gid)
コード例 #10
0
ファイル: roundup_server.py プロジェクト: AnishShah/roundup
def setuid(user):
    if not hasattr(os, 'getuid'):
        return

    # People can remove this check if they're really determined
    if user is None:
        if os.getuid():
            return
        raise ValueError, _("Can't run as root!")

    if os.getuid():
        print _('WARNING: ignoring "-u" argument, not root')
        return

    try:
        import pwd
    except ImportError:
        raise ValueError, _("Can't change users - no pwd module")
    try:
        try:
            uid = int(user)
        except ValueError:
            uid = pwd.getpwnam(user)[2]
        else:
            pwd.getpwuid(uid)
    except KeyError:
        raise ValueError, _("User %(user)s doesn't exist")%locals()
    os.setuid(uid)
コード例 #11
0
ファイル: configread.py プロジェクト: JumpeiArashi/blackbird
def is_user(value, min=None, max=None):
    """
    Check whether username or uid as argument exists.
    if this function recieved username, convert uid and exec validation.
    """

    if type(value) == str:
        try:
            entry = pwd.getpwnam(value)
            value = entry.pw_uid
        except KeyError:
            err_message = ('{0}: No such user.'.format(value))
            raise validate.VdtValueError(err_message)

        return value

    elif type(value) == int:
        try:
            pwd.getpwuid(value)
        except KeyError:
            err_message = ('{0}: No such user.'.format(value))
            raise validate.VdtValueError(err_message)

        return value

    else:
        err_message = ('Please, use str or int to "user" parameter.')
        raise validate.VdtTypeError(err_message)
コード例 #12
0
    def testStartCopy(self):
        run_name = '000000_RUNDIR_1234_ABCDEFG'
        source_run_root = os.path.join(self.run_root, 'source')
        source_rundir = os.path.join(source_run_root, run_name)
        os.makedirs(source_rundir)
        testfile = 'test.txt'
        with open(os.path.join(source_rundir, testfile), 'w') as f:
            f.write("Hello")

        dest_run_root = os.path.join(self.run_root, 'dest')
        dest_host = 'localhost'
        dest_group = grp.getgrgid(pwd.getpwuid(os.getuid()).pw_gid).gr_name
        dest_user = pwd.getpwuid(os.getuid()).pw_name
        os.makedirs(dest_run_root)

        config = {
            'COPY_DEST_HOST': dest_host,
            'COPY_DEST_USER': dest_user,
            'COPY_DEST_GROUP': dest_group,
            'COPY_DEST_RUN_ROOT': dest_run_root,
            'COPY_SOURCE_RUN_ROOTS': [source_run_root],
        }

        # Initialize autocopy and create the source root
        a = Autocopy(log_file=self.tmp_file.name, no_email=True, test_mode_lims=True, config=config, errors_to_terminal=DEBUG)
        a.update_rundirs_monitored()
        rundir = a.get_rundir(dirname=run_name)
        a.start_copy(rundir)
        retcode = rundir.copy_proc.wait()
        self.assertEqual(retcode, 0)

        with open(os.path.join(dest_run_root, run_name, testfile), 'r') as f:
            text = f.read()
        self.assertTrue(re.search("Hello", text))
        a.cleanup()
コード例 #13
0
ファイル: shika_daemon.py プロジェクト: keitaroyam/yamtbx
    def GET(self, uid, gid):
        web.header("Content-type","text/plain")
        uid, gid = map(int, (uid, gid))

        # Security
        if uid < 500 or gid < 500:
            yield "Invalid UID (%d) or GID (%d)\n" % (uid, gid)
            return

        try: pwd.getpwuid(uid)
        except KeyError:
            yield "UID (%d) does not exist\n" % (uid, gid)
            return

        for k in stop_program().GET(): yield k
        yield "Starting program with %d/%d\n" % (uid, gid)
        #p = subprocess.Popen(EXEC_SH, shell=True,
        #                     preexec_fn=lambda: change_user(uid,gid)) # This fails when running as daemon

        rpipe, wpipe = os.pipe() # Reference: http://ameblo.jp/oyasai10/entry-10615215673.html
        pid = os.fork()
        if pid == 0: # Child
            os.close(rpipe)
            wpipe = os.fdopen(wpipe, "w")
            change_user(uid,gid)
            p = subprocess.Popen(EXEC_SH, shell=True)
            wpipe.write("%d\n"%p.pid)
            sys.exit()
        else: # Parent
            os.close(wpipe)
            rpipe = os.fdopen(rpipe, "r")
            pid = int(rpipe.readline().strip())
            open(PID_FILE, "w").write("%d"%pid)
            os.wait() # Wait child
コード例 #14
0
ファイル: setup.py プロジェクト: pombreda/Cinnamon-Installer
def get_home():
    if "SUDO_USER" in os.environ:
        return os.path.expanduser("~" + os.environ["SUDO_USER"])
    elif "PKEXEC_UID" in os.environ:
        return os.path.expanduser("~" + pwd.getpwuid(int(os.environ["PKEXEC_UID"])).pw_name)
    else:
        return os.path.expanduser("~" + pwd.getpwuid(os.getuid()).pw_name)
コード例 #15
0
ファイル: worker.py プロジェクト: HasClass0/ajenti
    def demote(self, uid):
        try:
            username = pwd.getpwuid(uid).pw_name
            gid = pwd.getpwuid(uid).pw_gid
        except KeyError:
            username = None
            gid = uid

        if os.getuid() == uid:
            return
        else:
            if os.getuid() != 0:
                logging.warn('Running as a limited user, setuid() unavailable!')
                return

        logging.info(
            'Worker %s is demoting to UID %s / GID %s...',
            os.getpid(),
            uid,
            gid
        )

        groups = [
            g.gr_gid
            for g in grp.getgrall()
            if username in g.gr_mem or g.gr_gid == gid
        ]
        os.setgroups(groups)
        os.setgid(gid)
        os.setuid(uid)
        logging.info(
            '...done, new EUID %s EGID %s',
            os.geteuid(),
            os.getegid()
        )
コード例 #16
0
 def test_008_file_copy(self):
     """test clush (file copy)"""
     content = "%f" % time.time()
     f = make_temp_file(content)
     self._clush_t(["-w", HOSTNAME, "-c", f.name], None, "")
     f.seek(0)
     self.assertEqual(f.read(), content)
     # test --dest option
     f2 = tempfile.NamedTemporaryFile()
     self._clush_t(["-w", HOSTNAME, "-c", f.name, "--dest", f2.name], \
         None, "")
     f2.seek(0)
     self.assertEqual(f2.read(), content)
     # test --user option
     f2 = tempfile.NamedTemporaryFile()
     self._clush_t(["--user", pwd.getpwuid(os.getuid())[0], "-w", \
         HOSTNAME, "--copy", f.name, "--dest", f2.name], None, "")
     f2.seek(0)
     self.assertEqual(f2.read(), content)
     # test --rcopy
     self._clush_t(["--user", pwd.getpwuid(os.getuid())[0], "-w", \
         HOSTNAME, "--rcopy", f.name, "--dest", \
             os.path.dirname(f.name)], None, "")
     f2.seek(0)
     self.assertEqual(open("%s.%s" % (f.name, HOSTNAME)).read(), content)
コード例 #17
0
ファイル: test_memory_cache.py プロジェクト: SSSD/sssd
def test_mc_zero_timeout(ldap_conn, zero_timeout_rfc2307):
    """
    Test that the memory cache is not created at all with memcache_timeout=0
    """
    # No memory cache files must be created
    assert len(os.listdir(config.MCACHE_PATH)) == 0

    ent.assert_passwd_by_name(
        'user1',
        dict(name='user1', passwd='*', uid=1001, gid=2001,
             gecos='1001', shell='/bin/bash'))
    ent.assert_passwd_by_uid(
        1001,
        dict(name='user1', passwd='*', uid=1001, gid=2001,
             gecos='1001', shell='/bin/bash'))

    ent.assert_group_by_name("group1", dict(name="group1", gid=2001))
    ent.assert_group_by_gid(2001, dict(name="group1", gid=2001))
    stop_sssd()

    # sssd is stopped; so the memory cache should not be used
    # in long living clients (py.test in this case)
    with pytest.raises(KeyError):
        pwd.getpwnam('user1')
    with pytest.raises(KeyError):
        pwd.getpwuid(1001)

    with pytest.raises(KeyError):
        grp.getgrnam('group1')
    with pytest.raises(KeyError):
        grp.getgrgid(2001)
コード例 #18
0
    def get_nix_acl_data(cls, file_path, stat_info=None):
        """
        Get the Unix ACL data for the given path.
        """

        # Stop if this isn't Unix
        if not nix_import_available:
            return

        output = collections.OrderedDict()

        if stat_info is None:
            stat_info = os.stat(file_path)

        # Get the owner
        output['owner_uid'] = pwd.getpwuid(stat_info.st_uid)[0]
        output['owner'] = pwd.getpwuid(stat_info.st_uid)[0]

        # Get the group
        output['group_uid'] = stat_info.st_gid
        # This is disabled because grp isn't available on Splunk's python
        # output['group'] = grp.getgrgid(stat_info.st_gid)[0]

        # Get the permissions
        output['permission_mask'] = oct(stat_info.st_mode & 0777)

        return output
コード例 #19
0
ファイル: util.py プロジェクト: pol51/python-sipsimple
 def fullname(self):
     if platform.system() == 'Windows':
         name = os.getenv('USERNAME')
     else:
         import pwd
         name = pwd.getpwuid(os.getuid()).pw_gecos.split(',', 1)[0] or pwd.getpwuid(os.getuid()).pw_name
     return name.decode(sys.getfilesystemencoding())
コード例 #20
0
def test_request_and_spawn(capfd, request_and_spawn):
    request_and_spawn()

    captured = capfd.readouterr()
    print('**************\n%s\n**************' % captured.err)

    if request_and_spawn.kind != 'running':
        assert '%s:%s' % (pwd.getpwuid(os.getuid())[0], os.getpid()) in captured.err
        assert 'completed. Passing back results to' in captured.err
        assert 'Queues => 0 workspaces' in captured.err

    request_and_spawn(wait=False)
    request_and_spawn(wait=False)
    request_and_spawn(wait=False)
    request_and_spawn(wait=False)
    request_and_spawn()

    # wait for process list to settle (eg: there might be one or two extra processes that will exit because the lock
    # is already acquired - see StampedeStub)
    start = time.time()
    while len(get_children()) > 1 and time.time() - start < TIMEOUT:
        time.sleep(0.1)

    children = get_children()
    assert len(children) == 1
    for child in children:
        child.kill()

    captured = capfd.readouterr()
    print('##############\n%s\n##############' % captured.err)
    if request_and_spawn.kind != 'running':
        assert '%s:%s' % (pwd.getpwuid(os.getuid())[0], os.getpid()) in captured.err
        assert 'completed. Passing back results to' in captured.err
        assert 'Queues => 0 workspaces' in captured.err
コード例 #21
0
ファイル: pymaidentity.py プロジェクト: pudquick/pymaIdentity
 def __init__(self, name_or_uid = None):
     # If passed a string, assume user name
     # If passed a number, assume uid
     # If None, leave everything with a value of None
     
     # Initialize everything to None
     for i in self._fields:
         setattr(self, i, None)
     
     # Determine whether we were passed a name or a uid or a User
     if isinstance(name_or_uid, User):
         # Guessing it's a User object - clone the settings
         # Clone if user name or uid present, otherwise None
         if name_or_uid != None:
             if name_or_uid.name is not None:
                 pw_info = pwd.getpwnam(name_or_uid.name)
             else:
                 pw_info = pwd.getpwuid(name_or_uid.uid)
             self._init_with_pwd(pw_info)
     elif isinstance(name_or_uid, (int,long)):
         # Guessing it's a uid
         try:
             pw_info = pwd.getpwuid(name_or_uid)
             self._init_with_pwd(pw_info)
         except KeyError:
             self.uid = None
     elif isinstance(name_or_uid, basestring):
         # Guessing it's a user name
         try:
             pw_info = pwd.getpwnam(name_or_uid)
             self._init_with_pwd(pw_info)
         except KeyError:
             self.name = None
コード例 #22
0
def set_user(uid, assign_all_groups):
    try:
        # Get user's default group and set it to current process to make sure
        # file permissions are inherited correctly
        # Solves issue with permission denied for JSON files
        gid = pwd.getpwuid(uid).pw_gid
        import grp
        os.setgid(gid)
        if assign_all_groups:
            # Added lines to assure read/write permission for groups
            user = pwd.getpwuid(uid).pw_name
            groups = [g.gr_gid for g in grp.getgrall() if user in g.gr_mem]

            os.setgroups(groups)
        os.setuid(uid)

    except OSError as e:
        if e.errno == errno.EPERM:
            sys.stderr.write("error: setuid(%d) failed: permission denied. Did you setup 'sudo' correctly for this script?\n" % uid)
            exit(1)
        else:
            pass

    if os.getuid() == 0:
        sys.stderr.write("error: UID is 0 (root) after changing user. This script should not be run as root. aborting.\n")
        exit(1)

    if os.geteuid() == 0:
        sys.stderr.write("error: EUID is 0 (root) after changing user. This script should not be run as root. aborting.\n")
        exit(1)
コード例 #23
0
ファイル: paraLite.py プロジェクト: PayasR/paralite
 def init_default_configs(self):
     conf = config.config()
     dic = {}
     dic[conf.TEMP_DIR] = "/home/%s/.paralite-tmp" % (pwd.getpwuid(os.getuid())[0])
     dic[conf.LOG_DIR] = "/home/%s/.paralite-log" % (pwd.getpwuid(os.getuid())[0])
     dic[conf.BLOCK_SIZE] = 0
     return dic
コード例 #24
0
ファイル: daemon.py プロジェクト: bkeep/ganglia_contrib
def drop_privileges(uid_name="nobody", gid_name="nogroup"):
    import pwd, grp

    starting_uid = os.getuid()
    starting_gid = os.getgid()

    starting_uid_name = pwd.getpwuid(starting_uid)[0]

    if os.getuid() != 0:
        # We're not root so, like, whatever dude
        return

    if starting_uid == 0:

        # Get the uid/gid from the name
        running_uid = pwd.getpwnam(uid_name)[2]
        # running_gid = grp.getgrnam(gid_name)[2]

        # Try setting the new uid/gid
        # os.setgid(running_gid)
        os.setuid(running_uid)

        new_umask = 077
        old_umask = os.umask(new_umask)
        sys.stderr.write("drop_privileges: Old umask: %s, new umask: %s\n" % (oct(old_umask), oct(new_umask)))

    final_uid = os.getuid()
    final_gid = os.getgid()
    sys.stderr.write("drop_privileges: running as %s/%s\n" % (pwd.getpwuid(final_uid)[0], grp.getgrgid(final_gid)[0]))
コード例 #25
0
ファイル: test_enumeration.py プロジェクト: jhrozek/sssd
def test_sanity_rfc2307_bis(ldap_conn, sanity_rfc2307_bis):
    passwd_pattern = ent.contains_only(
        dict(name="user1", passwd="*", uid=1001, gid=2001, gecos="1001", dir="/home/user1", shell="/bin/bash"),
        dict(name="user2", passwd="*", uid=1002, gid=2002, gecos="1002", dir="/home/user2", shell="/bin/bash"),
        dict(name="user3", passwd="*", uid=1003, gid=2003, gecos="1003", dir="/home/user3", shell="/bin/bash"),
    )
    ent.assert_passwd(passwd_pattern)

    group_pattern = ent.contains_only(
        dict(name="group1", passwd="*", gid=2001, mem=ent.contains_only()),
        dict(name="group2", passwd="*", gid=2002, mem=ent.contains_only()),
        dict(name="group3", passwd="*", gid=2003, mem=ent.contains_only()),
        dict(name="empty_group1", passwd="*", gid=2010, mem=ent.contains_only()),
        dict(name="empty_group2", passwd="*", gid=2011, mem=ent.contains_only()),
        dict(name="two_user_group", passwd="*", gid=2012, mem=ent.contains_only("user1", "user2")),
        dict(name="group_empty_group", passwd="*", gid=2013, mem=ent.contains_only()),
        dict(name="group_two_empty_groups", passwd="*", gid=2014, mem=ent.contains_only()),
        dict(name="one_user_group1", passwd="*", gid=2015, mem=ent.contains_only("user1")),
        dict(name="one_user_group2", passwd="*", gid=2016, mem=ent.contains_only("user2")),
        dict(name="group_one_user_group", passwd="*", gid=2017, mem=ent.contains_only("user1")),
        dict(name="group_two_user_group", passwd="*", gid=2018, mem=ent.contains_only("user1", "user2")),
        dict(name="group_two_one_user_groups", passwd="*", gid=2019, mem=ent.contains_only("user1", "user2")),
    )
    ent.assert_group(group_pattern)

    with pytest.raises(KeyError):
        pwd.getpwnam("non_existent_user")
    with pytest.raises(KeyError):
        pwd.getpwuid(1)
    with pytest.raises(KeyError):
        grp.getgrnam("non_existent_group")
    with pytest.raises(KeyError):
        grp.getgrgid(1)
コード例 #26
0
ファイル: sockapi.py プロジェクト: chenglch/confluent
def _unixdomainhandler():
    unixsocket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
    try:
        os.remove("/var/run/confluent/api.sock")
    except OSError:  # if file does not exist, no big deal
        pass
    unixsocket.bind("/var/run/confluent/api.sock")
    os.chmod("/var/run/confluent/api.sock",
             stat.S_IWOTH | stat.S_IROTH | stat.S_IWGRP |
             stat.S_IRGRP | stat.S_IWUSR | stat.S_IRUSR)
    atexit.register(removesocket)
    unixsocket.listen(5)
    while True:
        cnn, addr = unixsocket.accept()
        creds = cnn.getsockopt(socket.SOL_SOCKET, SO_PEERCRED,
                               struct.calcsize('iII'))
        pid, uid, gid = struct.unpack('iII', creds)
        skipauth = False
        if uid in (os.getuid(), 0):
            #this is where we happily accept the person
            #to do whatever.  This allows the server to
            #start with no configuration whatsoever
            #and yet still be configurable by some means
            skipauth = True
            try:
                authname = pwd.getpwuid(uid).pw_name
            except:
                authname = "UNKNOWN SUPERUSER"
        else:
            try:
                authname = pwd.getpwuid(uid).pw_name
            except KeyError:
                cnn.close()
                return
        eventlet.spawn_n(sessionhdl, cnn, authname, skipauth)
コード例 #27
0
ファイル: root_fork.py プロジェクト: 4sp1r3/oschameleon
def drop_privileges(uid_name='nobody', gid_name='nogroup'):
    print("Init: Running as {0}/{1}.".format(pwd.getpwuid(os.getuid())[0], grp.getgrgid(os.getgid())[0]))
    wanted_uid = pwd.getpwnam(uid_name)[2]
    wanted_gid = grp.getgrnam(gid_name)[2]

    pid = gevent.fork()
    if pid == 0:
        # child
        print 'starting child process'
        child_process = gevent.spawn(root_process)
        child_process.join()
        print 'Child done:', child_process.successful()
        oschameleon.osfuscation.flush_tables()
        print 'Child exit'
    else:
        # parent
        os.setgid(wanted_gid)
        os.setuid(wanted_uid)
        new_uid_name = pwd.getpwuid(os.getuid())[0]
        new_gid_name = grp.getgrgid(os.getgid())[0]
        print("Parent: Privileges dropped, running as {0}/{1}.".format(new_uid_name, new_gid_name))
        while True:
            try:
                gevent.sleep(1)
                print 'Parent: ping'
            except KeyboardInterrupt:
                break
コード例 #28
0
ファイル: dockutil.py プロジェクト: wavesaudio/instl
def valid_uid(uid):
    """returns bool of whether uid can be resolved to a user"""
    try:
        pwd.getpwuid(uid)
        return True
    except Exception:
        return False
コード例 #29
0
ファイル: test_env.py プロジェクト: wyatt88/pcs
    def test_set_desired_file_access(self):
        #setup
        file_path = rc("temp-keyfile")
        if os.path.exists(file_path):
            os.remove(file_path)
        with open(file_path, "w") as file:
            file.write("content")

        #check assumptions
        stat = os.stat(file_path)
        self.assertNotEqual('600', oct(stat.st_mode)[-3:])
        current_user = pwd.getpwuid(os.getuid())[0]
        if current_user != settings.pacemaker_uname:
            file_user = pwd.getpwuid(stat.st_uid)[0]
            self.assertNotEqual(file_user, settings.pacemaker_uname)
        current_group = grp.getgrgid(os.getgid())[0]
        if current_group != settings.pacemaker_gname:
            file_group = grp.getgrgid(stat.st_gid)[0]
            self.assertNotEqual(file_group, settings.pacemaker_gname)

        #run tested method
        env.set_keyfile_access(file_path)

        #check
        stat = os.stat(file_path)
        self.assertEqual('600', oct(stat.st_mode)[-3:])

        file_user = pwd.getpwuid(stat.st_uid)[0]
        self.assertEqual(file_user, settings.pacemaker_uname)

        file_group = grp.getgrgid(stat.st_gid)[0]
        self.assertEqual(file_group, settings.pacemaker_gname)
コード例 #30
0
ファイル: nxFile.py プロジェクト: 40a/WPSDSCLinux
def TestOwnerGroupMode(DestinationPath, SourcePath, fc):
    stat_info = os.lstat(DestinationPath)

    if SourcePath:
        stat_info_src = os.lstat(SourcePath)

    if fc.Owner:
        Specified_Owner_ID = pwd.getpwnam(fc.Owner)[2]
        if Specified_Owner_ID != pwd.getpwuid(stat_info.st_uid)[2]:
            return False
    elif SourcePath:
        # Owner wasn't specified, if SourcePath is specified then check that the Owners match
        if pwd.getpwuid(stat_info.st_uid)[2] != pwd.getpwuid(stat_info_src.st_uid)[2]:
            return False

    if fc.Group:
        Specified_Group_ID = grp.getgrnam(fc.Group)[2]
        if Specified_Group_ID != grp.getgrgid(stat_info.st_gid)[2]:
            return False
    elif SourcePath:
        # Group wasn't specified, if SourcePath is specified then check that the Groups match
        if grp.getgrgid(stat_info.st_gid)[2] != grp.getgrgid(stat_info_src.st_gid)[2]:
            return False
    
    # Mode is irrelevant to symlinks
    if not os.path.islink(DestinationPath):
        if fc.Mode:
            if str(oct(stat_info.st_mode))[-3:] != fc.Mode:
                return False
        elif SourcePath:
            # Mode wasn't specified, if SourcePath is specified then check that the Modes match
            if str(oct(stat_info.st_mode))[-3:] != str(oct(stat_info_src.st_mode))[-3:]:
                return False

    return True
コード例 #31
0
    if S_ISLNK(mode):
        d['lnk_source'] = os.path.realpath(path)

    if S_ISREG(mode) and get_md5 and os.access(path, os.R_OK):
        # Will fail on FIPS-140 compliant systems
        try:
            d['md5'] = module.md5(path)
        except ValueError:
            d['md5'] = None

    if S_ISREG(mode) and get_checksum and os.access(path, os.R_OK):
        d['checksum'] = module.sha1(path)

    try:
        pw = pwd.getpwuid(st.st_uid)

        d['pw_name'] = pw.pw_name

        grp_info = grp.getgrgid(st.st_gid)
        d['gr_name'] = grp_info.gr_name
    except:
        pass

    module.exit_json(changed=False, stat=d)


# import module snippets
from ansible.module_utils.basic import *

main()
コード例 #32
0
ファイル: unarchive.py プロジェクト: lightissa/global
    def is_unarchived(self):
        # BSD unzip doesn't support zipinfo listings with timestamp.
        cmd = [self.zipinfocmd_path, '-T', '-s', self.src]
        if self.excludes:
            cmd.extend([
                '-x',
            ] + self.excludes)
        rc, out, err = self.module.run_command(cmd)

        old_out = out
        diff = ''
        out = ''
        if rc == 0:
            unarchived = True
        else:
            unarchived = False

        # Get some information related to user/group ownership
        umask = os.umask(0)
        os.umask(umask)
        systemtype = platform.system()

        # Get current user and group information
        groups = os.getgroups()
        run_uid = os.getuid()
        run_gid = os.getgid()
        try:
            run_owner = pwd.getpwuid(run_uid).pw_name
        except:
            run_owner = run_uid
        try:
            run_group = grp.getgrgid(run_gid).gr_name
        except:
            run_group = run_gid

        # Get future user ownership
        fut_owner = fut_uid = None
        if self.file_args['owner']:
            try:
                tpw = pwd.getpwname(self.file_args['owner'])
            except:
                try:
                    tpw = pwd.getpwuid(self.file_args['owner'])
                except:
                    tpw = pwd.getpwuid(run_uid)
            fut_owner = tpw.pw_name
            fut_uid = tpw.pw_uid
        else:
            try:
                fut_owner = run_owner
            except:
                pass
            fut_uid = run_uid

        # Get future group ownership
        fut_group = fut_gid = None
        if self.file_args['group']:
            try:
                tgr = grp.getgrnam(self.file_args['group'])
            except:
                try:
                    tgr = grp.getgrgid(self.file_args['group'])
                except:
                    tgr = grp.getgrgid(run_gid)
            fut_group = tgr.gr_name
            fut_gid = tgr.gr_gid
        else:
            try:
                fut_group = run_group
            except:
                pass
            fut_gid = run_gid

        for line in old_out.splitlines():
            change = False

            pcs = line.split(None, 7)
            if len(pcs) != 8:
                # Too few fields... probably a piece of the header or footer
                continue

            # Check first and seventh field in order to skip header/footer
            if len(pcs[0]) != 7 and len(pcs[0]) != 10:
                continue
            if len(pcs[6]) != 15:
                continue

            # Possible entries:
            #   -rw-rws---  1.9 unx    2802 t- defX 11-Aug-91 13:48 perms.2660
            #   -rw-a--     1.0 hpf    5358 Tl i4:3  4-Dec-91 11:33 longfilename.hpfs
            #   -r--ahs     1.1 fat    4096 b- i4:2 14-Jul-91 12:58 EA DATA. SF
            #   --w-------  1.0 mac   17357 bx i8:2  4-May-92 04:02 unzip.macr
            if pcs[0][0] not in 'dl-?' or not frozenset(
                    pcs[0][1:]).issubset('rwxstah-'):
                continue

            ztype = pcs[0][0]
            permstr = pcs[0][1:]
            version = pcs[1]
            ostype = pcs[2]
            size = int(pcs[3])
            path = to_text(pcs[7], errors='surrogate_or_strict')

            # Skip excluded files
            if path in self.excludes:
                out += 'Path %s is excluded on request\n' % path
                continue

            # Itemized change requires L for symlink
            if path[-1] == '/':
                if ztype != 'd':
                    err += 'Path %s incorrectly tagged as "%s", but is a directory.\n' % (
                        path, ztype)
                ftype = 'd'
            elif ztype == 'l':
                ftype = 'L'
            elif ztype == '-':
                ftype = 'f'
            elif ztype == '?':
                ftype = 'f'

            # Some files may be storing FAT permissions, not Unix permissions
            # For FAT permissions, we will use a base permissions set of 777 if the item is a directory or has the execute bit set.  Otherwise, 666.
            #     This permission will then be modified by the system UMask.
            # BSD always applies the Umask, even to Unix permissions.
            # For Unix style permissions on Linux or Mac, we want to use them directly.
            #     So we set the UMask for this file to zero.  That permission set will then be unchanged when calling _permstr_to_octal

            if len(permstr) == 6:
                if path[-1] == '/':
                    permstr = 'rwxrwxrwx'
                elif permstr == 'rwx---':
                    permstr = 'rwxrwxrwx'
                else:
                    permstr = 'rw-rw-rw-'
                file_umask = umask
            elif 'bsd' in systemtype.lower():
                file_umask = umask
            else:
                file_umask = 0

            # Test string conformity
            if len(permstr) != 9 or not ZIP_FILE_MODE_RE.match(permstr):
                raise UnarchiveError('ZIP info perm format incorrect, %s' %
                                     permstr)

            # DEBUG
#            err += "%s%s %10d %s\n" % (ztype, permstr, size, path)

            dest = os.path.join(self.dest, path)
            try:
                st = os.lstat(dest)
            except:
                change = True
                self.includes.append(path)
                err += 'Path %s is missing\n' % path
                diff += '>%s++++++.?? %s\n' % (ftype, path)
                continue

            # Compare file types
            if ftype == 'd' and not stat.S_ISDIR(st.st_mode):
                change = True
                self.includes.append(path)
                err += 'File %s already exists, but not as a directory\n' % path
                diff += 'c%s++++++.?? %s\n' % (ftype, path)
                continue

            if ftype == 'f' and not stat.S_ISREG(st.st_mode):
                change = True
                unarchived = False
                self.includes.append(path)
                err += 'Directory %s already exists, but not as a regular file\n' % path
                diff += 'c%s++++++.?? %s\n' % (ftype, path)
                continue

            if ftype == 'L' and not stat.S_ISLNK(st.st_mode):
                change = True
                self.includes.append(path)
                err += 'Directory %s already exists, but not as a symlink\n' % path
                diff += 'c%s++++++.?? %s\n' % (ftype, path)
                continue

            itemized = list('.%s.......??' % ftype)

            # Note: this timestamp calculation has a rounding error
            # somewhere... unzip and this timestamp can be one second off
            # When that happens, we report a change and re-unzip the file
            dt_object = datetime.datetime(
                *(time.strptime(pcs[6], '%Y%m%d.%H%M%S')[0:6]))
            timestamp = time.mktime(dt_object.timetuple())

            # Compare file timestamps
            if stat.S_ISREG(st.st_mode):
                if self.module.params['keep_newer']:
                    if timestamp > st.st_mtime:
                        change = True
                        self.includes.append(path)
                        err += 'File %s is older, replacing file\n' % path
                        itemized[4] = 't'
                    elif stat.S_ISREG(st.st_mode) and timestamp < st.st_mtime:
                        # Add to excluded files, ignore other changes
                        out += 'File %s is newer, excluding file\n' % path
                        self.excludes.append(path)
                        continue
                else:
                    if timestamp != st.st_mtime:
                        change = True
                        self.includes.append(path)
                        err += 'File %s differs in mtime (%f vs %f)\n' % (
                            path, timestamp, st.st_mtime)
                        itemized[4] = 't'

            # Compare file sizes
            if stat.S_ISREG(st.st_mode) and size != st.st_size:
                change = True
                err += 'File %s differs in size (%d vs %d)\n' % (path, size,
                                                                 st.st_size)
                itemized[3] = 's'

            # Compare file checksums
            if stat.S_ISREG(st.st_mode):
                crc = crc32(dest)
                if crc != self._crc32(path):
                    change = True
                    err += 'File %s differs in CRC32 checksum (0x%08x vs 0x%08x)\n' % (
                        path, self._crc32(path), crc)
                    itemized[2] = 'c'

            # Compare file permissions

            # Do not handle permissions of symlinks
            if ftype != 'L':

                # Use the new mode provided with the action, if there is one
                if self.file_args['mode']:
                    if isinstance(self.file_args['mode'], int):
                        mode = self.file_args['mode']
                    else:
                        try:
                            mode = int(self.file_args['mode'], 8)
                        except Exception as e:
                            try:
                                mode = AnsibleModule._symbolic_mode_to_octal(
                                    st, self.file_args['mode'])
                            except ValueError as e:
                                self.module.fail_json(
                                    path=path,
                                    msg="%s" % to_native(e),
                                    exception=traceback.format_exc())
                # Only special files require no umask-handling
                elif ztype == '?':
                    mode = self._permstr_to_octal(permstr, 0)
                else:
                    mode = self._permstr_to_octal(permstr, file_umask)

                if mode != stat.S_IMODE(st.st_mode):
                    change = True
                    itemized[5] = 'p'
                    err += 'Path %s differs in permissions (%o vs %o)\n' % (
                        path, mode, stat.S_IMODE(st.st_mode))

            # Compare file user ownership
            owner = uid = None
            try:
                owner = pwd.getpwuid(st.st_uid).pw_name
            except:
                uid = st.st_uid

            # If we are not root and requested owner is not our user, fail
            if run_uid != 0 and (fut_owner != run_owner or fut_uid != run_uid):
                raise UnarchiveError(
                    'Cannot change ownership of %s to %s, as user %s' %
                    (path, fut_owner, run_owner))

            if owner and owner != fut_owner:
                change = True
                err += 'Path %s is owned by user %s, not by user %s as expected\n' % (
                    path, owner, fut_owner)
                itemized[6] = 'o'
            elif uid and uid != fut_uid:
                change = True
                err += 'Path %s is owned by uid %s, not by uid %s as expected\n' % (
                    path, uid, fut_uid)
                itemized[6] = 'o'

            # Compare file group ownership
            group = gid = None
            try:
                group = grp.getgrgid(st.st_gid).gr_name
            except:
                gid = st.st_gid

            if run_uid != 0 and fut_gid not in groups:
                raise UnarchiveError(
                    'Cannot change group ownership of %s to %s, as user %s' %
                    (path, fut_group, run_owner))

            if group and group != fut_group:
                change = True
                err += 'Path %s is owned by group %s, not by group %s as expected\n' % (
                    path, group, fut_group)
                itemized[6] = 'g'
            elif gid and gid != fut_gid:
                change = True
                err += 'Path %s is owned by gid %s, not by gid %s as expected\n' % (
                    path, gid, fut_gid)
                itemized[6] = 'g'

            # Register changed files and finalize diff output
            if change:
                if path not in self.includes:
                    self.includes.append(path)
                diff += '%s %s\n' % (''.join(itemized), path)

        if self.includes:
            unarchived = False

        # DEBUG


#        out = old_out + out

        return dict(unarchived=unarchived,
                    rc=rc,
                    out=out,
                    err=err,
                    cmd=cmd,
                    diff=diff)
コード例 #33
0
def get_username():
    import pwd
    return pwd.getpwuid(os.getuid())[0]
コード例 #34
0
def main(argv):
    # Parse arguments. User must input a job name.
    parser = argparse.ArgumentParser(description='Main program to start or restart ' + \
             'sensitivity analysis for WRF-Hydro')
    parser.add_argument('jobID',metavar='jobID',type=str,nargs='+',
                        help='Job ID specific to your sensitivity/caliration workflow job.')
    parser.add_argument('--optDbPath',type=str,nargs='?',
                        help='Optional alternative path to SQLite DB file.')
    
    args = parser.parse_args()
    
    # If the SQLite file does not exist, throw an error.
    if args.optDbPath is not None:
        if not os.path.isfile(args.optDbPath):
            print "ERROR: " + args.optDbPath + " Does Not Exist."
            sys.exit(1)
        else:
            dbPath = args.optDbPath
    else:
        dbPath = topDir + "wrfHydroCalib.db"
        if not os.path.isfile(dbPath):
            print "ERROR: SQLite3 DB file: " + dbPath + " Does Not Exist."
            sys.exit(1)
    
    # Establish the beginning timestamp for this program.
    begTimeStamp = datetime.datetime.now()
    
    # Get current user who is running this program.
    userTmp = pwd.getpwuid(os.getuid()).pw_name
    
    # Initialize object to hold status and job information
    jobData = statusMod.statusMeta()
    jobData.jobID = int(args.jobID[0])
    jobData.dbPath = dbPath
    
    # Establish database connection.
    db = dbMod.Database(jobData)
    try:
        db.connect(jobData)
    except:
        print jobData.errMsg
        sys.exit(1)
        
    # Extract job data from database
    try:
        db.jobStatus(jobData)
    except:
        print jobData.errMsg
        sys.exit(1)
        
    # If the sensitivity flag is 0, simply exit gracefully as the user specified
    # not to run calibration.
    if jobData.sensFlag != 1:
        print "ERROR: Sensitivity flag was set to 0 for this workflow."
        sys.exit(1)
        
    # Establish LOCK file to secure this Python program to make sure
    # no other instances over-step here. This is mostly designed to deal
    # with nohup processes being kicked off Yellowstone/Cheyenne/Crontabs arbitrarily.
    # Just another check/balance here.
    lockPath = str(jobData.jobDir) + "/PYTHON.LOCK"
    if os.path.isfile(lockPath):
        # Either a job is still running, or was running
        # and was killed.

        print 'LOCK FILE FOUND.'
        # Read in to get PID number
        pidObj = pd.read_csv(lockPath)
        pidCheck = int(pidObj.PID[0])
        if errMod.check_pid(pidCheck):
                print "JOB: " + str(pidCheck) + \
                      " Is still running."
                sys.exit(0)
        else:
                print "JOB: " + str(pidCheck) + \
                      " Has Failed. Removing LOCK " + \
                      " file."
                os.remove(lockPath)
                fileObj = open(lockPath,'w')
                fileObj.write('\"PID\"\n')
                fileObj.write(str(os.getpid()))
                fileObj.close()
        # TEMPORARY FOR CHEYENNE. Since all cron jobs are launched
        # from an administrative node, we cannot monitor the process at 
        # all, which is an inconvenience. So.... we will check the last
        # modified time. If it's more than 30 minutes old, we will assume
        # the process is no longer running and can continue on with calibration.
        #dtRunCheck = datetime.datetime.now() - datetime.datetime.fromtimestamp(os.path.getmtime(lockPath))
        #if dtRunCheck.seconds/60.0 < 15.0:
        #    # We are going to assume a previous process is still running on the system. 
        #    # exit gracefully.
        #    print 'ASSUMING PROCESS STILL RUNNING'
        #    sys.exit(0)
        #else:
        #    # We are assuming the process is no longer running on the system. Alow
        #    # the workflow to continue. 
        #    print 'ALLOWING WORKFLOW TO CONINUE. REMOVING LOCK FILE'
        #    os.remove(lockPath)
        #    fileObj = open(lockPath,'w')
        #    fileObj.write('\"PID\"\n')
        #    fileObj.write(str(os.getpid()))
        #    fileObj.close()
    else:
        # Write a LOCK file for this program.
        fileObj = open(lockPath,'w')
        fileObj.write('\"PID\"\n')
        fileObj.write(str(os.getpid()))
        fileObj.close()
        
    # Pull extensive meta-data describing the job from the config file.
    configPath = str(jobData.jobDir) + "/setup.config"
    if not os.path.isfile(configPath):
        print "ERROR: Configuration file: " + configPath + " not found."
        sys.exit(1)
    try:        
        staticData = configMod.readConfig(configPath)
    except:
        print "ERROR: Failure to read configuration file: " + configPath
        sys.exit(1)
        
    # Assign the SQL command from the config file into the jobData structure
    jobData.gSQL = staticData.gSQL
    
    # Check gages in directory to match what's in the database
    try:
        jobData.checkGages2(db)
    except:
        errMod.errOut(jobData)
        
    # Some house keeping here. If the sensitivity is already complete, throw an error. 
    # Also ensure the spinup has been entered as complete. This is necessary for the 
    # sensitivity to run.
    # also, if this is a re-initiation under a different user, require the new
    # user to enter a new contact that will be unpdated in the database. 
    if int(jobData.spinComplete) != 1:
        # Check to see if optional spinup options were enabled. If so, update the spinup status.
        if staticData.coldStart == 1 or staticData.optSpinFlag != 0:
            print "Found optional spinup alternatives"
            jobData.spinComplete = 1
            try:
                db.updateSpinupStatus(jobData)
            except:
                errMod.errOut(jobData)
        else:
            jobData.errMsg = "ERROR: Spinup for job ID: " + str(jobData.jobID) + \
                             " is NOT complete. You must complete the spinup in order" + \
                             " to run calibration."
            errMod.errOut(jobData)
        
    if int(jobData.sensComplete) == 1:
        jobData.errMsg = "ERROR: Sensitivity for job ID: " + str(jobData.jobID) + \
                         " has already completed."
        errMod.errOut(jobData)
        
    if userTmp != jobData.owner:
        print "User: "******" is requesting to takeover jobID: " + \
              str(jobData.jobID) + " from owner: " + str(jobData.owner)
        strTmp = "Please enter new email address. Leave blank if no email " + \
                 "change is desired. NOTE if you leave both email and Slack " + \
                 "information blank, no change in contact will occur. Only " + \
                 "the owner will be modified:"
        newEmail = raw_input(strTmp)
        #strTmp = "Please enter Slack channel:"
        #newSlackChannel = raw_input(strTmp)
        #strTmp = "Please enter Slack token:"
        #newSlackToken = raw_input(strTmp)
        #strTmp = "Please enter Slack user name:"
        #newSlackUName = raw_input(strTmp)
        changeFlag = 1
        #if len(newSlackChannel) != 0 and len(newSlackToken) == 0:
        #    print "ERROR: You must specify an associated Slacker API token."
        #    sys.exit(1)
        #if len(newSlackChannel) != 0 and len(newSlackUName) == 0:
        #    print "ERROR: You must specify an associated Slacker user name."
        #    sys.exit(1)
        #if len(newSlackToken) != 0 and len(newSlackChannel) == 0:
        #    print "ERROR: You must specify an associated Slacker channel name."
        #    sys.exit(1)
        #if len(newSlackToken) != 0 and len(newSlackUName) == 0:
        #    print "ERROR: You must specify an associated Slacker user name."
        #    sys.exit(1)
        #if len(newSlackUName) != 0 and len(newSlackChannel) == 0:
        #    print "ERROR: You must specify an associated Slacker channel name."
        #    sys.exit(1)
        #if len(newSlackUName) != 0 and len(newSlackToken) == 0:
        #    print "ERROR: You must specify an associated Slacker API token."
        #    sys.exit(1)
        #if len(newSlackChannel) != 0 and len(newEmail) != 0:
        #    print "ERROR: You cannot specify both email and Slack for notifications."
        #    sys.exit(1)
        #if len(newSlackChannel) == 0 and len(newEmail) == 0:
        #    changeFlag = 0
            
        # PLACEHOLDER FOR CHECKING SLACK CREDENTIALS
            
        jobData.genMsg = "MSG: User: "******" Is Taking Over JobID: " + str(jobData.jobID) + \
                         " From Owner: " + str(jobData.owner)
        errMod.sendMsg(jobData)
        
        # TEMPORARY FOR VERSION 1.2 NWM CALIBRATION!!!!
        # If a new owner takes over, simply change the owner, but keep all 
        # other contact information the same.
        newEmail = jobData.email
        newSlackChannel = jobData.slChan
        newSlackToken = jobData.slToken
        newSlackUName = jobData.slUser
        if not newEmail:
            newEmail = ''
        if not newSlackChannel:
            newSlackChannel = ''
            newSlackToken = ''
            
        try:
            db.updateJobOwner(jobData,userTmp,newEmail,newSlackChannel,newSlackToken,newSlackUName,changeFlag)
        except:
            errMod.errOut(jobData)
            
    # Begin an "infinite" do loop. This loop will continue to loop through all 
    # the basins until sensitivity jobs are complete. Basins are allowed ONE failure. A restart
    # will be attempted. If the restart fails again, a LOCK file is placed into the
    # run directory and an error email is sent to the user.
    completeStatus = False
    
    # Create a "key" array. This array is of length [numBasins] and is initialized to 0.0.
    # Each array element can have the following values based on current model status:
    # 0.0 - Initial value
    # 0.10 - Job to generate parameter grids for each model job is being ran.
    # 0.25 - Job to generate parameter grids is complete. Ready to run models.....
    # 0.5 - Model simulationa are in progress
    # 0.75 - Job to read in model output and run sensitivity analysis is ready to be ran. 
    # 0.90 - Job to read in model output and run sensitivity analysis is running. 
    # 1.0 - Sensitivity analysis complete
    # -0.1 - Parameter generation failed. A LOCK file has been created. 
    # -0.5 - Model simulation failed once and a restart has been attempted
    # -0.90 - Sensitivity analysis job has failed. A LOCK file has been created. 
    # -1.0 - Model has failed twice. A LOCK file has been created.
    # Once all array elements are 1.0, then completeStatus goes to True, an entry into
    # the database occurs, and the program will complete.
    keySlot = np.empty([len(jobData.gages),int(jobData.nSensIter)])
    keySlot[:,:] = 0.0
    entryValue = float(len(jobData.gages)*int(jobData.nSensIter)*2.0)
    
    # Create an array to hold systme job ID values. This will only be used for
    # PBS as qstat has demonstrated slow behavior when doing a full qstat command. 
    # We will track job ID values and do a qstat <jobID> and populate this array
    # to keep track of things. 
    pbsJobId = np.empty([len(jobData.gages),int(jobData.nSensIter)],np.int64)
    pbsJobId[:,:] = -9999
    pbsCollectId = np.empty([len(jobData.gages),int(jobData.nSensIter)],np.int64)
    pbsCollectId[:,:] = -9999
    pbsPreId = np.empty([len(jobData.gages)],np.int64)
    pbsPreId[:] = -9999
    pbsPostId = np.empty([len(jobData.gages)],np.int64)
    pbsPostId[:] = -9999
    
    # Pull all the status values into the keySlot array. 
    for basin in range(0,len(jobData.gages)):
        domainID = jobData.gageIDs[basin]
            
        if domainID == -9999:
            jobData.errMsg = "ERROR: Unable to locate domainID for gage: " + str(jobData.gages[basin])
            errMod.errOut(jobData)
            
        # We are going to pull all values for one basin, then place them into the array.
        # This is faster then looping over each iteration at a time. 
        statusData = db.sensIterationStatus(jobData,domainID,str(jobData.gages[basin]))
        statusData = [list(item) for item in statusData]
        for iteration in range(0,int(jobData.nSensIter)):
            for iteration2 in range(0,int(jobData.nSensIter)):
                if statusData[iteration2][0] == iteration+1:
                    keySlot[basin,iteration] = float(statusData[iteration2][1])
            
    if len(np.where(keySlot != 0.0)[0]) == 0:
        # We need to either check to see if pre-processing has taken place, or
        # run it.
        preProcStatus = False
        
    while not completeStatus:
        # Walk through each basin undergoing sensitivity analysis. 
        for basin in range(0,len(jobData.gages)):
            print "GAGE: " + jobData.gages[basin]
            # Establish a status value for pre-processing the parameter values from R/Python code. 
            preProcStatus = False 
    
            # Establish a status value for post-processing the model output and running sensitivity
            # analysis.
            postProcStatus = False
            
            # Calculate the number of "batches" we are going to run
            nBatches = int(jobData.nSensIter/jobData.nSensBatch)
            entryValueBatch = float(jobData.nSensBatch)
            
            # If we have a pre-processing complete file, set our pre-proc status to True. 
            # Also, log parameter values generated if the log file hasn't been created. 
            preProcComplete = jobData.jobDir + "/" + jobData.gages[basin] + "/RUN.SENSITIVITY/preProc.COMPLETE"
            parmsLogged =  jobData.jobDir + "/" + jobData.gages[basin] + "/RUN.SENSITIVITY/PARAMS_LOGGED.COMPLETE"
            parmTxtFile = jobData.jobDir + "/" + jobData.gages[basin] + "/RUN.SENSITIVITY/params_new.txt"
            sensLogged = jobData.jobDir + "/" + jobData.gages[basin] + "/RUN.SENSITIVITY/SENS_LOGGED.COMPLETE"
            sensStats = jobData.jobDir + "/" + jobData.gages[basin] + "/RUN.SENSITIVITY/stat_sensitivity.txt"
            missingFlag = jobData.jobDir + "/" + jobData.gages[basin] + "/RUN.SENSITIVITY/CALC_STATS_MISSING"
            if os.path.isfile(preProcComplete):
                preProcStatus = True
                print "PRE PROCESSING COMPLETE!"
                if not os.path.isfile(parmsLogged):
                    # Log parameter values generated by pre-processing.
                    print "LOGGING PRE-PROC PARAM FILES."
                    try:
                        db.insertSensParms(jobData,parmsLogged,parmTxtFile,jobData.gageIDs[basin])
                    except:
                        jobData.errMsg = ("WARNING: Unable to log sensitivity parameters for basin: " + str(basin) + \
                                          " Job: " + str(jobData.jobID))
                        errMod.errOut(jobData)
            if not preProcStatus:
                try:
                    sensitivityMod.preProc(preProcStatus,jobData,staticData,db,jobData.gageIDs[basin],jobData.gages[basin],pbsPreId,basin)
                except:
                    errMod.errOut(jobData)
            else:
                # The goal here is to only operate on a fixed number of model runs at a time.
                # If you have a large parameter sample size, it's possible to have hundreds,
                # if not thousands of model permuatations. This worflow allows for 
                # only batches of model runs to be ran at a time as to not bog down the system. 
                for batchIter in range(0,nBatches):
                    time.sleep(30)
                    batchCheck = keySlot[basin,(batchIter*jobData.nSensBatch):((batchIter+1)*jobData.nSensBatch)]
                    if batchIter == 0:
                        batchCheckPrev = entryValueBatch
                    else:
                        batchCheckPrev = keySlot[basin,((batchIter-1)*jobData.nSensBatch):(batchIter*jobData.nSensBatch)]
                        batchCheckPrev = batchCheckPrev.sum()
                    if batchCheck.sum() != entryValueBatch and batchCheckPrev == entryValueBatch:
                        for iterTmp in range(0,jobData.nSensBatch):
                            iteration = batchIter*jobData.nSensBatch + iterTmp
                            keyCheck1 = keySlot[basin,iteration]
                            if keyCheck1 < 1:
                                # This model iteration has not completed. 
                                try:
                                    sensitivityMod.runModel(jobData,staticData,db,jobData.gageIDs[basin],jobData.gages[basin],keySlot,basin,iteration,pbsJobId,pbsCollectId)
                                except:
                                    errMod.errOut(jobData)
                                
                                if keySlot[basin,iteration] == 0.0 and keyCheck1 == 0.5:
                                    # Put some spacing between launching model simulations to slow down que geting 
                                    # overloaded.
                                    time.sleep(3)
                                    
                                # Update the temporary status array as it will be checked for this batch of model runs.
                                batchCheck[iterTmp] = keySlot[basin,iteration]
                                
            # Run post-processing ONLY when all model simulations are finished.
            if not postProcStatus and preProcStatus and len(np.where(batchCheck != 1.0)[0]) == 0:
                print "READY FOR POST PROCESSING"
                try:
                    sensitivityMod.postProc(postProcStatus,jobData,staticData,db,jobData.gageIDs[basin],jobData.gages[basin],pbsPostId,basin)
                except:
                    errMod.errOut(jobData)
                                
            postProcComplete = jobData.jobDir + "/" + jobData.gages[basin] + "/RUN.SENSITIVITY/postProc.COMPLETE"
            if os.path.isfile(postProcComplete):
                if not os.path.isfile(sensLogged) and not os.path.isfile(missingFlag):
                    # Log sensitivity statistics into the database.
                    if not os.path.isfile(sensStats):
                        jobData.errMsg = "ERROR: Expected to find: " + sensStats + " after post-processing. Not found."
                        errMod.errOut(jobData)
                    else:
                        try:
                            db.logSensStats(jobData,sensStats,jobData.gageIDs[basin],sensLogged)
                        except:
                            errMod.errOut(jobData)
                    # Check for complete flag on logging sensitivity statistics. 
                    if os.path.isfile(sensLogged):
                        postProcStatus = True
                        # Upgrade key status values as necessary
                        for iterTmp in range(0,jobData.nSensIter):
                            keySlot[basin,iterTmp] = 2.0
                elif os.path.isfile(sensLogged):
                    # Post-processing complete and statistics were sucessfully logged.
                    postProcStatus = True
                    # Upgrade key status values as necessary
                    for iterTmp in range(0,jobData.nSensIter):
                        keySlot[basin,iterTmp] = 2.0
                elif os.path.isfile(missingFlag):
                    # Missing obs were found. We will default to making this basin complete.
                    for iterTmp in range(0,jobData.nSensIter):
                        keySlot[basin,iterTmp] = 2.0
                        
            # TEMPORARY FOR CHEYENNE
            # Check to make sure program hasn't passed a prescribed time limit. If it has,
            # exit gracefully.
            #timeCheckStamp = datetime.datetime.now()
            #programDtCheck = timeCheckStamp - begTimeStamp
            #if programDtCheck.seconds/60.0 > 90.0: 
            #    # 90-minutes)
            #    try:
            #        fileObj = open(lockPath,'a')
            #        fileObj.write('WORKFLOW HAS HIT TIME LIMIT - EXITING....\n')
            #        fileObj.close()
            #    except:
            #        jobData.errMsg = "ERROR: Unable to update workflow LOCK file: " + lockPath
            #        errMod.errOut(jobData)
            
        # Check to see if program requirements have been met.
        if keySlot.sum() == entryValue and postProcStatus:
            jobData.sensComplete = 1
            try:
                db.updateSensStatus(jobData)
            except:
                errMod.errOut(jobData)
            jobData.genMsg = "SENSITIVITY FOR JOB ID: " + str(jobData.jobID) + " COMPLETE."
            errMod.sendMsg(jobData)
            completeStatus = True
            
        # Open the Python LOCK file. Write a blank line to the file and close it.
        # This action will simply modify the file modification time while only adding
        # a blank line.
        try:
            fileObj = open(lockPath,'a')
            fileObj.write('\n')
            fileObj.close()
        except:
            jobData.errMsg = "ERROR: Unable to update workflow LOCK file: " + lockPath
            errMod.errOut(jobData)
            
    # Remove LOCK file
    os.remove(lockPath)
コード例 #35
0
 def owner(self):
     """
     Return the login name of the file owner.
     """
     import pwd
     return pwd.getpwuid(self.stat().st_uid).pw_name
コード例 #36
0
ファイル: CompilerDaemon_mod.py プロジェクト: kalmuthu/DMS
            lock.release()
            handle_client_thread = RemoteJob.RemoteJob(connection,\
                                                       self.get_config()["scheduler_address"],\
                                                       string.atoi(self.get_config()["scheduler_port"]))
            handle_client_thread.start()


if __name__ == "__main__":

    config_file_path = "/etc/dms.cfg"
    # configuration data as a dictionnary
    config_dictionnary = {
        # paths
        "temp_dir": "/tmp/",
        "lock_file_path":
        pwd.getpwnam(pwd.getpwuid(os.getuid())[0])[5] + "/dms.pid",
        # execution context
        "user_privilege": pwd.getpwuid(os.getuid())[0],
        "max_processes": 2,
        "port": str(Protocol.COMPILER_DAEMON_PORT),
        #address
        "scheduler_address": "192.168.0.1",
        "scheduler_port": "50008"
    }

    configuration_keys_match = [
        ("temp_dir", "paths", "temp_dir"),
        ("lock_file_path", "paths", "lock_file_path"),
        ("user_privilege", "exec", "user_privilege"),
        ("max_processes", "exec", "max_processes"), ("port", "exec", "port"),
        ("scheduler_address", "address", "scheduler_address"),
コード例 #37
0
ファイル: identity.py プロジェクト: stefanzier/envoy
 def home(self) -> str:
     """Gets *and sets if required* the `HOME` env var"""
     os.environ["HOME"] = os.environ.get("HOME", pwd.getpwuid(os.getuid()).pw_dir)
     return os.environ["HOME"]
コード例 #38
0
ファイル: assetstore.py プロジェクト: salamb/girder
 def _getHdfsUser(assetstore):
     """
     If the given assetstore has an effective user specified, this returns
     it. Otherwise returns the current user.
     """
     return assetstore['hdfs'].get('user') or pwd.getpwuid(os.getuid())[0]
コード例 #39
0
        socket.socket(family=socket.AF_UNIX).connect(DOCKER_SOCKET_PATH)
    except socket.error as e:
        if e.errno == errno.EACCES:
            return False
        if e.errno == errno.ENOENT:
            # Docker is not installed
            return False
        raise
    else:
        return True


if_docker_configured = skipUnless(
    docker_accessible(), "User '{}' does not have permission "
    "to access the Docker server socket '{}'".format(
        pwd.getpwuid(os.geteuid()).pw_name, DOCKER_SOCKET_PATH))


def wait_for_unit_state(docker_client, unit_name, expected_activation_states):
    """
    Wait until a unit is in the requested state.

    :param docker_client: A ``DockerClient`` instance.
    :param unicode unit_name: The name of the unit.
    :param expected_activation_states: Activation states to wait for.

    :return: ``Deferred`` that fires when required state has been reached.
    """
    def is_in_states(units):
        for unit in units:
            if unit.name == unit_name:
コード例 #40
0
ファイル: util.py プロジェクト: systamonster/zato
def get_current_user():
    return getpwuid(getuid()).pw_name
コード例 #41
0
ファイル: app.py プロジェクト: the-cc-dev/repo2docker
 def _user_name_default(self):
     """
     Default user_name to current running user.
     """
     return pwd.getpwuid(os.getuid()).pw_name
コード例 #42
0
ファイル: cephprocesses.py プロジェクト: jasonvoor/DeepSea
def check(results=False, quiet=False, **kwargs):
    """
    Query the status of running processes for each role.  Return False if any
    fail.  If results flag is set, return a dictionary of the form:
      { 'down': [ process, ... ], 'up': { process: [ pid, ... ], ...} }
    """
    running = True
    res = {'up': {}, 'down': []}

    if 'rgw_configurations' in __pillar__:
        for rgw_config in __pillar__['rgw_configurations']:
            processes[rgw_config] = ['radosgw']

    # pylint: disable=too-many-nested-blocks
    if 'roles' in __pillar__:
        for role in kwargs.get('roles', __pillar__['roles']):
            # Checking running first.
            for running_proc in psutil.process_iter():
                # NOTE about `ps` and psutils.Process():
                # `ps -e` determines process names by examining
                # /proc/PID/stat,status files.  The name derived
                # there is also found in psutil.Process.name.
                # `ps -ef`, according to strace, appears to also reference
                # /proc/PID/cmdline when determining
                # process names.  We have found that some processes (ie.
                # ceph-mgr was noted) will _sometimes_
                # contain a process name in /proc/PIDstat/stat,status that does
                # not match that found in /proc/PID/cmdline.
                # In our ceph-mgr example, the process name was found to be
                # 'exe' (which happens to also be the name a of
                # symlink in /proc/PID that points to the executable) while the
                # cmdline entry contained 'ceph-mgr' etc.
                # As such, we've decided that a check based on executable
                # path is more reliable.
                pdict = running_proc.as_dict(
                    attrs=['pid', 'name', 'exe', 'uids'])
                pdict_exe = os.path.basename(pdict['exe'])
                pdict_pid = pdict['pid']
                # Convert the numerical UID to name.
                pdict_uid = pwd.getpwuid(pdict['uids'].real).pw_name
                if pdict_exe in processes[role]:
                    # Verify httpd-worker pid belongs to openattic.
                    if (role != 'openattic') or (role == 'openattic'
                                                 and pdict_uid == 'openattic'):
                        if pdict_exe in res['up']:
                            res['up'][pdict_exe] = res['up'][pdict_exe] + [
                                pdict_pid
                            ]
                        else:
                            res['up'][pdict_exe] = [pdict_pid]

            # Any processes for this role that aren't running, mark them down.
            for proc in processes[role]:
                if proc not in res['up']:
                    if not quiet:
                        log.error(
                            "ERROR: process {} for role {} is not running".
                            format(proc, role))
                    running = False
                    res['down'] += [proc]
            # pylint: disable=fixme
            # FIXME: Map osd.ids to processes.pid to improve qualitify of logging
            # currently you can only say how many osds/ if any are down, but not
            # which osd is down exactly.
            if role == 'storage':
                if 'ceph-osd' in res['up']:
                    if len(__salt__['osd.list']()) > len(
                            res['up']['ceph-osd']):
                        if not quiet:
                            log.error("ERROR: At least one OSD is not running")
                        res = {'up': {}, 'down': {'ceph-osd': 'ceph-osd'}}
                        running = False

    return res if results else running
コード例 #43
0
ファイル: processes.py プロジェクト: fengzhongzhu1621/xTool
def uid_to_name(uid):
    """用户ID转换为用户名称 ."""
    return pwd.getpwuid(uid).pw_name
コード例 #44
0
ファイル: plugin.py プロジェクト: gerka/wp-plugin-boilerplate
    for mod_key, module in module_args.items():
        if mod_key not in config['modules']:
            config['modules'][mod_key] = module
    is_update = True
    print("Update existsing plugin:", plugin_name)
except IOError as a:
    # create new
    if plugin_name:
        # create new
        config = {
            'plugin_name': plugin_name,
            'plugin_slug': plugin_slug(plugin_name),
            'wp_plugin_slug': slugify(plugin_name, '-'),
            'plugin_namespace': plugin_classname(plugin_name),
            'plugin_author': pwd.getpwuid(os.getuid()).pw_gecos,
            'plugin_author_uri': '',
            'modules': parse_args(module_argv),
            'this_year': date.today().year
        }
        print("Create new plugin:", plugin_name)

    else:
        print('`wp-plugin-boilerplate.json` does not exist in cwd')
        sys.exit(1)

plug = p.plugin()
plug.configure(config, plugin_dir)
plug.set_override('--force' in sys.argv)
plug.set_update(is_update)
plug.pre_process()
コード例 #45
0
ファイル: gmacpyutil.py プロジェクト: vmiller/imagr
def GetConsoleUser():
  """Returns current console user."""
  stat_info = os.stat('/dev/console')
  console_user = pwd.getpwuid(stat_info.st_uid)[0]
  return console_user
コード例 #46
0
ファイル: constants.py プロジェクト: liumorgan/kitty
def get_boss():
    return get_boss.boss


def set_boss(m):
    get_boss.boss = m
    set_c_boss(m)


def wakeup():
    get_boss.boss.child_monitor.wakeup()


base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
terminfo_dir = os.path.join(base_dir, 'terminfo')
logo_data_file = os.path.join(base_dir, 'logo', 'kitty.rgba')
try:
    shell_path = pwd.getpwuid(os.geteuid()).pw_shell or '/bin/sh'
except KeyError:
    print('Failed to read login shell from /etc/passwd for current user, falling back to /bin/sh', file=sys.stderr)
    shell_path = '/bin/sh'


def glfw_path(module):
    return os.path.join(base, 'glfw-{}.so'.format(module))


is_wayland = False
if os.environ.get('WAYLAND_DISPLAY') and 'KITTY_ENABLE_WAYLAND' in os.environ and os.path.exists(glfw_path('wayland')):
    is_wayland = True
コード例 #47
0
 def get_username(self):
     index_file = os.path.join(self.docroot, 'index.php')
     return getpwuid(os.stat(index_file).st_uid).pw_name
コード例 #48
0
def main():

    populate_global_dict()
    initvariable()
    logFolderName = globalDict['logdir']
    hadoop_conf = globalDict['hadoop_conf']
    pid_dir_path = globalDict['USERSYNC_PID_DIR_PATH']
    unix_user = globalDict['unix_user']

    if pid_dir_path == "":
        pid_dir_path = "/var/run/ranger"

    if logFolderName.lower() == "$pwd" or logFolderName == "":
        logFolderName = os.path.join(os.getcwd(), "logs")
    ugsyncLogFolderName = logFolderName

    dirList = [
        rangerBaseDirName, usersyncBaseDirFullName, confFolderName,
        certFolderName
    ]
    for dir in dirList:
        if (not os.path.isdir(dir)):
            os.makedirs(dir, 0750)

    defFileList = [defaultSiteXMLFileName, log4jFileName]
    for defFile in defFileList:
        fn = join(confDistDirName, defFile)
        if (isfile(fn)):
            shutil.copy(fn, join(confFolderName, defFile))

    #
    # Create JAVA_HOME setting in confFolderName
    #
    java_home_setter_fn = join(confFolderName, 'java_home.sh')
    if isfile(java_home_setter_fn):
        archiveFile(java_home_setter_fn)
    jhf = open(java_home_setter_fn, 'w')
    str = "export JAVA_HOME=%s\n" % os.environ['JAVA_HOME']
    jhf.write(str)
    jhf.close()
    os.chmod(java_home_setter_fn, 0750)

    if (not os.path.isdir(localConfFolderName)):
        os.symlink(confFolderName, localConfFolderName)

    defaultProps = getXMLConfigMap(join(confFolderName,
                                        defaultSiteXMLFileName))
    installProps = getPropertiesConfigMap(
        join(installPropDirName, installPropFileName))
    modifiedInstallProps = convertInstallPropsToXML(installProps)

    mergeProps = {}
    mergeProps.update(defaultProps)
    mergeProps.update(modifiedInstallProps)

    localLogFolderName = mergeProps['ranger.usersync.logdir']
    if localLogFolderName.lower() == "$pwd" or localLogFolderName == "":
        localLogFolderName = logFolderName
    if (not os.path.isdir(localLogFolderName)):
        if (localLogFolderName != ugsyncLogFolderName):
            os.symlink(ugsyncLogFolderName, localLogFolderName)

    if (not 'ranger.usersync.keystore.file' in mergeProps):
        mergeProps['ranger.usersync.keystore.file'] = defaultKSFileName

    ksFileName = mergeProps['ranger.usersync.keystore.file']

    if (not isfile(ksFileName)):
        mergeProps['ranger.usersync.keystore.password'] = defaultKSPassword
        createJavaKeystoreForSSL(ksFileName, defaultKSPassword)

    if ('ranger.usersync.keystore.password' not in mergeProps):
        mergeProps['ranger.usersync.keystore.password'] = defaultKSPassword

    fn = join(installTemplateDirName, templateFileName)
    outfn = join(confFolderName, outputFileName)

    if (os.path.isdir(logFolderName)):
        logStat = os.stat(logFolderName)
        logStat.st_uid
        logStat.st_gid
        ownerName = pwd.getpwuid(logStat.st_uid).pw_name
        groupName = pwd.getpwuid(logStat.st_uid).pw_name
    else:
        os.makedirs(logFolderName, logFolderPermMode)

    if (not os.path.isdir(ugsyncLogFolderName)):
        os.makedirs(ugsyncLogFolderName, logFolderPermMode)

    if (not os.path.isdir(pid_dir_path)):
        os.makedirs(pid_dir_path, logFolderPermMode)

    if (unixUserProp in mergeProps):
        ownerName = mergeProps[unixUserProp]
    else:
        mergeProps[unixUserProp] = "ranger"
        ownerName = mergeProps[unixUserProp]

    if (unixGroupProp in mergeProps):
        groupName = mergeProps[unixGroupProp]
    else:
        mergeProps[unixGroupProp] = "ranger"
        groupName = mergeProps[unixGroupProp]

    try:
        groupId = grp.getgrnam(groupName).gr_gid
    except KeyError, e:
        groupId = createGroup(groupName)
コード例 #49
0
def __get_username():
    """ Returns the effective username of the current process. """
    if sys.platform == 'win32':
        return getpass.getuser()
    import pwd
    return pwd.getpwuid(os.geteuid()).pw_name
コード例 #50
0
ファイル: file.py プロジェクト: mjura/salt-1
 def test_chown_no_user(self):
     user = '******'
     group = grp.getgrgid(pwd.getpwuid(os.getuid()).pw_gid).gr_name
     ret = self.run_function('file.chown', arg=[self.myfile, user, group])
     self.assertIn('not exist', ret)
コード例 #51
0
def current_user():
    if pwd:
        return pwd.getpwuid(os.geteuid()).pw_name
    else:
        return getpass.getuser()
コード例 #52
0
ファイル: __init__.py プロジェクト: dangnammta/zato
 def __init__(self, backend_type, default_namespace, session=None):
     self.backend_type = backend_type
     self.default_namespace = default_namespace
     self.session = session
     self._lock_class = self._lock_impl[backend_type]
     self.user_name = getpwuid(os.getuid()).pw_name
コード例 #53
0
ファイル: vc.py プロジェクト: oliver-sanders/rose
    def generate_info_config(self, from_id=None, prefix=None, project=None):
        """Generate a metomi.rose.config.ConfigNode for a rose-suite.info.

        This is suitable for passing into the create method of this
        class.
        If from_id is defined, copy items from it.
        Return the metomi.rose.config.ConfigNode instance.

        """
        from_project = None
        from_title = None
        if from_id is not None:
            from_info_url = "%s/%s/rose-suite.info@%s" % (
                from_id.to_origin(),
                from_id.branch,
                from_id.revision,
            )
            out_data = self.popen("svn", "cat", from_info_url)[0]
            from_config = metomi.rose.config.load(StringIO(out_data.decode()))

        res_loc = ResourceLocator.default()
        older_config = None
        info_config = metomi.rose.config.ConfigNode()

        # Determine project if given as a command-line option on create
        if from_id is None and project is not None:
            info_config.set(["project"], project)

        # Set the compulsory fields and use the project and metadata if
        #  available.
        meta_config = load_meta_config(
            info_config, config_type=metomi.rose.INFO_CONFIG_NAME
        )
        if from_id is None and project is not None:
            for node_keys, node in meta_config.walk(no_ignore=True):
                if isinstance(node.value, dict):
                    continue
                sect, key = node_keys
                value = node.value
                sect = sect.translate(None, "=")
                if key == "compulsory" and value == "true":
                    info_config.set([sect], "")
            info_config.set(["project"], project)
        else:
            if from_project is None:
                info_config.set(["project"], "")
            if from_title is None:
                info_config.set(["title"], "")

        # Determine prefix
        if prefix is None:
            if from_id is None:
                prefix = SuiteId.get_prefix_default()
            else:
                prefix = from_id.prefix

        # Determine owner:
        # 1. From user configuration [rosie-id]prefix-username
        # 2. From username of a matching group in [groups] in
        #    ~/.subversion/servers
        # 3. Current user ID
        owner = res_loc.get_conf().get_value(
            ["rosie-id", "prefix-username." + prefix]
        )
        if not owner and self.subversion_servers_conf:
            servers_conf = metomi.rose.config.load(
                self.subversion_servers_conf
            )
            groups_node = servers_conf.get(["groups"])
            if groups_node is not None:
                prefix_loc = SuiteId.get_prefix_location(prefix)
                prefix_host = urlparse(prefix_loc).hostname
                for key, node in groups_node.value.items():
                    if fnmatch(prefix_host, node.value):
                        owner = servers_conf.get_value([key, "username"])
                        break
        if not owner:
            owner = pwd.getpwuid(os.getuid())[0]
        info_config.set(["owner"], owner)

        # Copy description
        try:
            from_id.to_string_with_version()
            info_config.set(
                ["description"],
                "Copy of %s" % (from_id.to_string_with_version()),
            )
        except AttributeError:
            pass

        # Copy fields provided by the user
        try:
            from_config.walk(no_ignore=False)
            for node_keys, node in from_config.walk(no_ignore=False):
                if isinstance(node.value, dict):
                    continue
                sect, key = node_keys
                value = node.value
                if key in ["description", "owner", "access-list"] or (
                    key == "project" and from_project is not None
                ):
                    pass
                else:
                    info_config.set([key], value)
        except UnboundLocalError:
            pass

        # Determine access list
        access_list_str = res_loc.get_conf().get_value(
            ["rosie-vc", "access-list-default"]
        )
        if access_list_str:
            info_config.set(["access-list"], access_list_str)
        if from_id is None and project is not None:
            for node_keys, node in meta_config.walk(no_ignore=True):
                if isinstance(node.value, dict):
                    continue
                sect, key = node_keys
                value = node.value
                sect = sect.translate(None, "=")
                if key == "value-hints" or key == "values":
                    reminder = (
                        "please remove all commented hints/lines "
                        + "in the main/top section before saving."
                    )
                    info_config.set(
                        [sect],
                        metomi.rose.variable.array_split(value)[0],
                        comments=[value, reminder],
                    )
        if older_config is not None:
            for node_keys, node in older_config.walk(no_ignore=True):
                if isinstance(node.value, dict):
                    continue
                sect, key = node_keys
                value = node.value
                info_config.set([key], value)

        return info_config
コード例 #54
0
 def _parse(self, file, fp, default_netrc):
     lexer = shlex.shlex(fp)
     lexer.wordchars += '!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~'
     lexer.commenters = lexer.commenters.replace('#', '')
     while 1:
         saved_lineno = lexer.lineno
         toplevel = tt = lexer.get_token()
         if not tt:
             break
         elif tt[0] == '#':
             if lexer.lineno == saved_lineno and len(tt) == 1:
                 lexer.instream.readline()
             continue
         elif tt == 'machine':
             entryname = lexer.get_token()
         elif tt == 'default':
             entryname = 'default'
         elif tt == 'macdef':
             entryname = lexer.get_token()
             self.macros[entryname] = []
             lexer.whitespace = ' \t'
             while 1:
                 line = lexer.instream.readline()
                 if not line or line == '\n':
                     lexer.whitespace = ' \t\r\n'
                     break
                 self.macros[entryname].append(line)
             continue
         else:
             raise NetrcParseError('bad toplevel token %r' % tt, file,
                                   lexer.lineno)
         login = ''
         account = password = None
         self.hosts[entryname] = {}
         while 1:
             tt = lexer.get_token()
             if tt.startswith('#') or tt in {
                     '', 'machine', 'default', 'macdef'
             }:
                 if password:
                     self.hosts[entryname] = login, account, password
                     lexer.push_token(tt)
                     break
                 else:
                     raise NetrcParseError(
                         'malformed %s entry %s terminated by %s' %
                         (toplevel, entryname, repr(tt)), file,
                         lexer.lineno)
             elif tt == 'login' or tt == 'user':
                 login = lexer.get_token()
             elif tt == 'account':
                 account = lexer.get_token()
             elif tt == 'password':
                 if os.name == 'posix' and default_netrc:
                     prop = os.fstat(fp.fileno())
                     if prop.st_uid != os.getuid():
                         import pwd
                         try:
                             fowner = pwd.getpwuid(prop.st_uid)[0]
                         except KeyError:
                             fowner = 'uid %s' % prop.st_uid
                         try:
                             user = pwd.getpwuid(os.getuid())[0]
                         except KeyError:
                             user = '******' % os.getuid()
                         raise NetrcParseError(
                             '~/.netrc file owner (%s) does not match current user (%s)'
                             % (fowner, user), file, lexer.lineno)
                     if prop.st_mode & (stat.S_IRWXG | stat.S_IRWXO):
                         raise NetrcParseError(
                             '~/.netrc access too permissive: access permissions must restrict access to only the owner',
                             file, lexer.lineno)
                 password = lexer.get_token()
             else:
                 raise NetrcParseError('bad follower token %r' % tt, file,
                                       lexer.lineno)
コード例 #55
0

def tract_list(repo):
    return ['0']


def patch_list(repo, tract=0):
    # List of patches for the requested repo and tract.
    return ['0,0']


def filter_list():
    return [filt for filt in 'ugrizy']


USER = pwd.getpwuid(os.getuid())[0]

# Create a abstract dag
dax = ADAG("Level_2_Pipeline")

# Add some workflow-level metadata
dax.metadata("creator", "%s@%s" % (USER, os.uname()[1]))
dax.metadata("created", time.ctime())

input_repo = '/global/cscratch1/sd/descdm/DC1/DC1-imsim-dithered'
output_repo = '.'

# Ingest the raw images.
ingestImages = Job('ingestImages.py')
file_names = glob.glob(os.path.join(input_repo, 'lsst_a_*.fits*'))
ingestImages.addArguments(input_repo, '--output', output_repo, *file_names)
コード例 #56
0
def exit_argumenterr():
	print "[EXIT] Input arguments are not correctly given"
	print "[EXIT] single : python make_"+nametag+".py RUNMODE SAMPLENAME TUNEFILE INPUTPATH"
        print "[EXIT] single [CRABJOB] : python make_"+nametag+".py CRABJOB SAMPLENAME TUNEFILE INPUTPATH NEVENTS NCORES"
	print "[EXIT] multiple : python make_"+nametag+".py RUNMODE LIST.dat"
	sys.exit()

def exit_runhosterr(runmode,hostname):
	print "[EXIT] "+runmode+" could not be submitted to "+hostname
	print "[EXIT] MULTICORE : SNU"
	print "[EXIT] CLUSTER : SNU tamsa & KISTI"
	print "[EXIT] CRABJOB : KNU & KISTI"
	sys.exit()

username = pwd.getpwuid(os.getuid()).pw_name

cwd = os.getcwd()
datasettag = cwd.split("/")[-2]
nametag =datasettag.split("__")[0]
year = cwd.split("/")[-3]

try:
  runmode = sys.argv[1]
except:
  exit_argumenterr()
if (len(sys.argv) == 3):
  if (".dat" in sys.argv[2]):
    multi_flag = True
    inputpath = sys.argv[2]
  else:
コード例 #57
0
ファイル: readdata.py プロジェクト: Laga/dexcTrack3
  def Connect(self):
    try:
        if self._port is None:
            self._port = serial.Serial(port=self._port_name, baudrate=115200, timeout=4.3)
    except serial.SerialException as e:
        if sys.version_info < (3, 0):
            sys.exc_clear()
        try:
            if self._port is None:
                #print ('First attempt failed')
                if sys.platform == "linux" or sys.platform == "linux2" or sys.platform == "darwin":
                    # Trying to access the port file may help make it visible.
                    # For example, on Linux, running 'ls <self._port_name>' helps make
                    # a subsequent serial port access work.
                    try:
                        stat_info = os.stat(self._port_name)
                    except OSError as e:
                        #print ('Connect() - os.stat() : Exception =', e)
                        if sys.version_info < (3, 0):
                            sys.exc_clear()
                        pass
                time.sleep(15)
                self._port = serial.Serial(port=self._port_name, baudrate=115200, timeout=4.3)

        except serial.SerialException as e:
            if sys.version_info < (3, 0):
                sys.exc_clear()
            print ('Read/Write permissions missing for', self._port_name)
            if sys.platform == "linux" or sys.platform == "linux2" or sys.platform == "darwin":
                stat_info = os.stat(self._port_name)
                port_gid = stat_info.st_gid
                port_group = grp.getgrgid(port_gid)[0]
                username = pwd.getpwuid(os.getuid())[0]
                print ('\nFor a persistent solution (recommended), run ...')
                if sys.platform == "darwin":
                    print ('\n   sudo dseditgroup -o edit -a', username, '-t user', port_group)
                else:
                    # On Mint, Ubuntu, etc.
                    print ('\n   sudo addgroup', username, port_group)
                    print ('\n   sudo -', username)
                    print ('\n         OR')
                    # On Fedora, Red Hat, etc.
                    print ('\n   sudo usermod -a -G', port_group, username)
                    print ('\n   su -', username)
                print ('\nFor a short term solution, run ...')
                print ('\n   sudo chmod 666', self._port_name,'\n')
    if self._port is not None:
        try:
            self.clear()
            #print ('Connect() : self.clear()')
        except Exception as e:
            #print ('Connect() - self.clear() : Exception =', e)
            if sys.version_info < (3, 0):
                sys.exc_clear()
            pass

        try:
            self.flush()
            #print ('Connect() : self.flush()')
        except Exception as e:
            #print ('Connect() - self.flush() : Exception =', e)
            if sys.version_info < (3, 0):
                sys.exc_clear()
            pass
コード例 #58
0
    def filemetadata(self, filename):

        fullname = filename.replace(os.sep, "/")
        fullname = fullname.lstrip("/")

        pax = self.pax_headers.copy()

        pax["path"] = fullname

        if hasattr(os, "lstat"):
            fstat = os.lstat(filename)
        else:
            fstat = os.stat(filename)

        linkname = ""

        st_mode = fstat.st_mode
        if stat.S_ISREG(st_mode):
            inode = (fstat.st_ino, fstat.st_dev)

            if fstat.st_nlink > 1 and inode in self.inodes and fullname != self.inodes[
                    inode]:
                type_ = LNKTYPE
                linkname = self.indes[inode]
            else:
                type_ = REGTYPE
                if inode[0]:
                    self.inodes[inode] = fullname

        elif stat.S_ISDIR(st_mode):
            type_ = DIRTYPE

        elif stat.S_ISFIFO(st_mode):
            type_ = FIFOTYPE

        elif stat.S_ISLNK(st_mode):
            type_ = SYMTYPE
            linkname = os.readlink(fullname)

        elif stat.S_ISCHR(st_mode):
            type_ = CHRTYPE

        elif stat.S_ISBLK(st_mode):
            type_ = BLKTYPE

        else:
            logger.warn("不支持的文件类型:{}".format(filename))

        if linkname:
            pax["linkpath"] = linkname

        s, c = divmod(fstat.st_size, BLOCKSIZE)
        if c > 0:
            s += 1

        pax["size"] = "{:o}".format(fstat.st_size)

        if self.save_permission:
            pax["uid"] = str(fstat.st_uid)
            pax["gid"] = str(fstat.st_gid)

            try:
                import pwd
                pax["uname"] = pwd.getpwuid(fstat.st_uid).pw_name
            except ModuleNotFoundError:
                pass

            try:
                import grp
                pax["gname"] = grp.getgrgid(fstat.st_gid).gr_name
            except ModuleNotFoundError:
                pass

        pax["atime"] = str(fstat.st_atime)
        pax["mtime"] = str(fstat.st_mtime)

        # return pax_header data
        return self._create_pax_generic_header(type_, pax)
コード例 #59
0
import os
import pwd
import re
import sys
import tempfile
from typing import List

from . import error, tpg

PARAMS = {
    "ssh_path": "/usr/bin/ssh",
    "rsh_path": "/usr/bin/rsh",
    "method": "ssh",
    "maxparallel": "0",
    "user": pwd.getpwuid(os.geteuid())[0],
    "format": r"### %d(stat: %s, dur(s): %t):\n%o\n",
}

METHODS = ["ssh", "rsh"]

__user_dir = os.path.join(os.environ["HOME"], ".tentakel")
__user_plugin_dir = os.path.join(__user_dir, "plugins")


class TConf(tpg.Parser):
    __doc__ = r"""

    set lexer = ContextSensitiveLexer

    token keyword  : '%(keywords)s'  str ;
コード例 #60
0
ファイル: helpers.py プロジェクト: Unkn0wn-MDCLXIV/borg
def uid2user(uid, default=None):
    try:
        return pwd.getpwuid(uid).pw_name
    except KeyError:
        return default