コード例 #1
0
ファイル: hooks.py プロジェクト: cloudbase/zuul-charm
def install():
    subprocess.check_call(['apt-get', 'install', '-y'] + PACKAGES)

    install_from_git(ZUUL_GIT_URL, config('version'))
    install_from_git(GEAR_GIT_URL, GEAR_STABLE_TAG)

    try:
        pwd.getpwnam(ZUUL_USER)
    except KeyError:
        # create Zuul user
        subprocess.check_call(["useradd", "--create-home", ZUUL_USER])

    directories = [ ZUUL_CONF_DIR, ZUUL_SSH_DIR, ZUUL_RUN_DIR, ZUUL_STATE_DIR,
                    ZUUL_GIT_DIR, ZUUL_LOG_DIR, ZUUL_MERGER_RUN_DIR ]
    zuul_user = pwd.getpwnam(ZUUL_USER)
    for directory in directories:
        if not os.path.exists(directory):
            os.mkdir(directory)
        os.chmod(directory, 0755)
        os.chown(directory, zuul_user.pw_uid, zuul_user.pw_gid)

    generate_zuul_ssh_key()

    # generate configuration files
    render_logging_conf()
    render_gearman_logging_conf()
    render_layout()
    render_zuul_conf()
    create_zuul_upstart_services()
    download_openstack_functions()

    configure_apache2()
コード例 #2
0
ファイル: config.py プロジェクト: lhm-limux/gosa
    def __init__(self,  config="/etc/gosa/config",  noargs=False):
        # Load default user name for config parsing
        self.__registry['core']['config'] = config
        self.__noargs = noargs
        user = '******'
        group = 'gosa'
        userHome = '/var/lib/gosa'

        if platform.system() != "Windows":
            try:
                userHome = pwd.getpwnam(user).pw_dir
                group = grp.getgrgid(pwd.getpwnam(user).pw_gid).gr_name
            except KeyError:
                pass

            self.__registry['core']['user'] = user
            self.__registry['core']['group'] = group
            self.__registry['core']['workdir'] = userHome

        # Load file configuration
        if not self.__noargs:
            self.__parseCmdOptions()
        self.__parseCfgOptions()

        # Overload with command line options
        if not self.__noargs:
            self.__parseCmdOptions()
コード例 #3
0
def smb4_groupname_is_username(group):
    try:
        pwd.getpwnam(group)
    except KeyError:
        return False

    return True
コード例 #4
0
ファイル: main.py プロジェクト: roshan3133/DevOps
def check_user(user):
  print ("Checking %s user exist or not." % (user))
  try:
    pwd.getpwnam(user)
    return True
  except KeyError:
    return False
コード例 #5
0
ファイル: YamlDaemon.py プロジェクト: paramonov/yaml-server
    def drop_privileges(self, uid_name=None, gid_name=None):
        """ Drop privileges
        
        Found in https://github.com/zedshaw/python-lust/blob/master/lust/unix.py
        """
        if os.getuid() != 0:
            self.logger.warning("Must be root to drop privileges!")
            return
    
        # Get the uid/gid from the name. If no group given, then derive group from uid_name
        if uid_name is None:
            uid_name = "nobody"  # builtin default is nobody
        running_uid = pwd.getpwnam(uid_name).pw_uid
        if gid_name is None:
            running_gid = pwd.getpwnam(uid_name).pw_gid
        else:
            running_gid = grp.getgrnam(gid_name).gr_gid

        self.logger.debug("Running as %r.%r" % (running_uid, running_gid))
    
        # Remove group privileges
        os.setgroups([])
    
        # Try setting the new uid/gid
        os.setgid(running_gid)
        os.setuid(running_uid)
    
        # Ensure a very conservative umask
        os.umask(077)
コード例 #6
0
    def _init_log_file(self):
        if self._log_file is not None:
            self._log_file.close()

        if os.getuid() == 0 and not is_sudoed:
            logs_dir = SYSTEM_LOGS_DIR
        else:
            logs_dir = USER_LOGS_DIR

        if not os.path.exists(logs_dir):
            os.makedirs(logs_dir)

            # Fix permissions in case we need to create the dir with sudo
            if is_sudoed:
                uid = pwd.getpwnam(usr).pw_uid
                gid = grp.getgrnam(usr).gr_gid
                os.chown(logs_dir, uid, gid)

        log_fn = "{}/{}.log".format(logs_dir, self._app_name)

        # Fix permissions in case we need to create the file with sudo
        if not os.path.isfile(log_fn) and is_sudoed:
            # touch
            with open(log_fn, 'a'):
                pass

            uid = pwd.getpwnam(usr).pw_uid
            gid = grp.getgrnam(usr).gr_gid
            os.chown(log_fn, uid, gid)

        self._log_file = open("{}/{}.log".format(logs_dir, self._app_name), "a")
コード例 #7
0
ファイル: test_memory_cache.py プロジェクト: 3van/sssd
def test_removed_mc(ldap_conn, sanity_rfc2307):
    """
    Regression test for ticket:
    https://fedorahosted.org/sssd/ticket/2726
    """

    ent.assert_passwd_by_name(
        'user1',
        dict(name='user1', passwd='*', uid=1001, gid=2001,
             gecos='1001', shell='/bin/bash'))
    ent.assert_passwd_by_uid(
        1001,
        dict(name='user1', passwd='*', uid=1001, gid=2001,
             gecos='1001', shell='/bin/bash'))

    ent.assert_group_by_name("group1", dict(name="group1", gid=2001))
    ent.assert_group_by_gid(2001, dict(name="group1", gid=2001))
    stop_sssd()

    # remove cache without invalidation
    for path in os.listdir(config.MCACHE_PATH):
        os.unlink(config.MCACHE_PATH + "/" + path)

    # sssd is stopped; so the memory cache should not be used
    # in long living clients (py.test in this case)
    with pytest.raises(KeyError):
        pwd.getpwnam('user1')
    with pytest.raises(KeyError):
        pwd.getpwuid(1001)

    with pytest.raises(KeyError):
        grp.getgrnam('group1')
    with pytest.raises(KeyError):
        grp.getgrgid(2001)
コード例 #8
0
def validate_user(username):
    """ Checks if a user exist. """
    try:
        pwd.getpwnam(username)
    except KeyError:
        return False
    return True
コード例 #9
0
ファイル: process.py プロジェクト: allfro/python-libnmap
    def sudo_run_background(self, run_as='root'):
        """
        Public method enabling the library's user to run in background a
        nmap scan with priviledges via sudo.
        The sudo configuration should be set manually on the local system
        otherwise sudo will prompt for a password.
        This method alters the command line by prefixing the sudo command to
        nmap and will then call self.run()

        :param run_as: user name to which the lib needs to sudo to run the scan

        :return: return code from nmap execution
        """
        sudo_user = run_as.split().pop()
        try:
            pwd.getpwnam(sudo_user).pw_uid
        except KeyError:
            raise

        sudo_path = self._whereis("sudo")
        if sudo_path is None:
            raise EnvironmentError(2, "sudo is not installed or "
                                      "could not be found in system path: "
                                      "cannot run nmap with sudo")

        self.__sudo_run = "{0} -u {1}".format(sudo_path, sudo_user)
        super(NmapProcess, self).start()
コード例 #10
0
ファイル: download_agent.py プロジェクト: goldenboy/shared
    def create_file(self):
        """Create a cookie file. ...
        """
        cookie_filename = None
        try:
            cookie_filename = self.filename
        except AttributeError:
            return

        if not cookie_filename:
            return

        # Create the path to the cookie file if it does not exist.
        dir_name = os.path.dirname(cookie_filename)
        user = '******'
        if not os.path.exists(dir_name):
            os.makedirs(dir_name)
            # Change ownership on the session cookie file so http can access it.
            os.chown(dir_name,
                    pwd.getpwnam(user).pw_uid,
                    pwd.getpwnam(user).pw_gid)

        # Create empty cookie file if not exists
        if not os.path.exists(cookie_filename):
            self.save()
            # Change ownership on the session cookie file so http can access it.
            os.chown(cookie_filename,
                    pwd.getpwnam(user).pw_uid,
                    pwd.getpwnam(user).pw_gid)
        return
コード例 #11
0
ファイル: bootstrap.py プロジェクト: tkelman/mp
def install_buildbot_slave(name, path=None, script_dir='', shell=False, **args):
  username = '******'
  if platform.system() == 'Linux':
    # Create buildbot user if it doesn't exist.
    username = '******'
    import pwd
    try:
      pwd.getpwnam(username)
    except KeyError:
      check_call(['sudo', 'useradd', '--system', '--home', '/var/lib/buildbot',
                  '--create-home', '--shell', '/bin/false', 'buildbot'])
  path = path or os.path.expanduser('~{0}/slave'.format(username))
  if os.path.exists(path):
    return
  pip_install('buildbot-slave', 'buildbot')
  # The password is insecure but it doesn't matter as the buildslaves are
  # not publicly accessible.
  command = [os.path.join(script_dir, 'buildslave'),
             'create-slave', path, args.get('ip', '10.0.2.2'), name, 'pass']
  if not windows:
    command = ['sudo', '-u', username] + command
  check_call(command, shell=shell)
  if windows:
    return
  if args.get('nocron', False):
    return
  pip_install('python-crontab', 'crontab')
  from crontab import CronTab
  cron = CronTab(username)
  cron.new('PATH={0}:/usr/local/bin buildslave start {1}'.format(
    os.environ['PATH'], path)).every_reboot()
  cron.write()
  # Ignore errors from buildslave as the buildbot may not be accessible.
  call(['sudo', '-H', '-u', username, 'buildslave', 'start', path])
コード例 #12
0
ファイル: setup.py プロジェクト: gurjeet/IntelligentMirror
def create_file(filename, user=None, group=None, mode=0755):
    """Create a file in the filesystem with user:group ownership and mode as permissions."""
    try:
        file = open(filename, 'a')
        file.close()
        log(format%("Created file " + filename + " ."))
    except:
        log(format%("Could not create file " + filename + " ."))
        return False
    
    try:
        os.chmod(filename, mode)
        log(format%("Changed mode of file " + filename + " ."))
    except:
        log(format%("Could not change the mode of the file " + filename + " ."))
        return False

    if user == None:
        return True

    user = pwd.getpwnam(user)[2]
    if group != None:
        group = pwd.getpwnam(group)[3]
    else:
        group = user

    try:
        os.chown(filename, user, group)
        log(format%("Changed ownership of file " + filename + " ."))
    except:
        log(format%("Could not change ownership of file " + filename + " ."))
        return False
    return True
コード例 #13
0
ファイル: prerequisites.py プロジェクト: PJeBeK/cms
    def install_conf(self):
        """Install configuration files"""
        assert_root()

        print("===== Copying configuration to /usr/local/etc/")
        root = pwd.getpwnam("root")
        cmsuser = pwd.getpwnam("cmsuser")
        makedir(os.path.join(USR_ROOT, "etc"), root, 0o755)
        for conf_file_name in ["cms.conf", "cms.ranking.conf"]:
            conf_file = os.path.join(USR_ROOT, "etc", conf_file_name)
            # Skip if destination is a symlink
            if os.path.islink(conf_file):
                continue
            # If the config exists, check if the user wants to overwrite it
            if os.path.exists(conf_file):
                if not ask("The %s file is already installed, "
                           "type Y to overwrite it: " % (conf_file_name)):
                    continue
            if os.path.exists(os.path.join(".", "config", conf_file_name)):
                copyfile(os.path.join(".", "config", conf_file_name),
                         conf_file, cmsuser, 0o660)
            else:
                conf_file_name = "%s.sample" % conf_file_name
                copyfile(os.path.join(".", "config", conf_file_name),
                         conf_file, cmsuser, 0o660)
コード例 #14
0
ファイル: test-runner.py プロジェクト: LLNL/zfs
def verify_user(user, logger):
    """
    Verify that the specified user exists on this system, and can execute
    sudo without being prompted for a password.
    """
    testcmd = [SUDO, '-n', '-u', user, TRUE]

    if user in Cmd.verified_users:
        return True

    try:
        getpwnam(user)
    except KeyError:
        logger.info("Warning: user '%s' does not exist.", user)
        return False

    p = Popen(testcmd)
    p.wait()
    if p.returncode is not 0:
        logger.info("Warning: user '%s' cannot use passwordless sudo.", user)
        return False
    else:
        Cmd.verified_users.append(user)

    return True
コード例 #15
0
ファイル: ftp_server.py プロジェクト: 0xkag/M2Crypto
 def authorize (self, channel, username, password):
     if string.lower(username) in ['anonymous', 'ftp']:
         import pwd
         try:
             # ok, here we run into lots of confusion.
             # on some os', anon runs under user 'nobody',
             # on others as 'ftp'.  ownership is also critical.
             # need to investigate.
             # linux: new linuxen seem to have nobody's UID=-1,
             #    which is an illegal value.  Use ftp.
             ftp_user_info = pwd.getpwnam ('ftp')
             if string.lower(os.uname()[0]) == 'linux':
                 nobody_user_info = pwd.getpwnam ('ftp')
             else:
                 nobody_user_info = pwd.getpwnam ('nobody')
             channel.read_only = 1
             if self.root is None:
                 self.root = ftp_user_info[5]
             fs = filesys.unix_filesystem (self.root, '/')
             return 1, 'Anonymous Login Successful', fs
         except KeyError:
             return 0, 'Anonymous account not set up', None
     elif self.real_users:
         return unix_authorizer.authorize (
                 self,
                 channel,
                 username,
                 password
                 )
     else:
         return 0, 'User logins not allowed', None
コード例 #16
0
ファイル: format.py プロジェクト: pombredanne/slapos.core
  def create(self):
    """
    Create a user on the system who will be named after the self.name with its
    own group and directory.

    Returns:
        True: if the user creation went right
    """
    # XXX: This method shall be no-op in case if all is correctly setup
    #      This method shall check if all is correctly done
    #      This method shall not reset groups, just add them
    try:
      grp.getgrnam(self.name)
    except KeyError:
      callAndRead(['groupadd', self.name])

    user_parameter_list = ['-d', self.path, '-g', self.name, '-s',
      '/bin/false']
    if self.additional_group_list is not None:
      user_parameter_list.extend(['-G', ','.join(self.additional_group_list)])
    user_parameter_list.append(self.name)
    try:
      pwd.getpwnam(self.name)
    except KeyError:
      user_parameter_list.append('-r')
      callAndRead(['useradd'] + user_parameter_list)
    else:
      callAndRead(['usermod'] + user_parameter_list)

    return True
コード例 #17
0
ファイル: ls.py プロジェクト: alexAubin/elliot
    def touch(self,filename) :
        open(filename, 'a').close()
 
        # Dirty hack
        uid = pwd.getpwnam(self.traineeName).pw_uid
        gid = pwd.getpwnam(self.guideName).pw_uid
        os.chown(filename,uid,gid) 
コード例 #18
0
ファイル: System.py プロジェクト: bloveing/openulteo
    def userExist(name_):
        try:
            pwd.getpwnam(System.local_encode(name_))
        except KeyError:
            return False

        return True
コード例 #19
0
ファイル: validators.py プロジェクト: chriskuehl/ocflib
def user_exists(username):
    try:
        pwd.getpwnam(username)
    except KeyError:
        return False
    else:
        return True
コード例 #20
0
ファイル: sysv.py プロジェクト: bluejayKR/monasca-agent
    def enable(self):
        """Sets monasca-agent to start on boot.

            Generally this requires running as super user
        """
        # Create monasca-agent user/group if needed
        try:
            user = pwd.getpwnam(self.username)
        except KeyError:
            subprocess.check_call(['useradd', '-r', self.username])
            user = pwd.getpwnam(self.username)

        # Create dirs
        # todo log dir is hardcoded
        for path in (self.log_dir, self.config_dir, '%s/conf.d' % self.config_dir):
            if not os.path.exists(path):
                os.makedirs(path, 0o755)
                os.chown(path, 0, user.pw_gid)
        # the log dir needs to be writable by the user
        os.chown(self.log_dir, user.pw_uid, user.pw_gid)

        # link the init script, then enable
        if not os.path.exists(self.init_script):
            os.symlink(self.init_template, self.init_script)
            os.chmod(self.init_script, 0o755)

        for runlevel in ['2', '3', '4', '5']:
            link_path = '/etc/rc%s.d/S10monasca-agent' % runlevel
            if not os.path.exists(link_path):
                os.symlink(self.init_script, link_path)

        log.info('Enabled {0} service via SysV init script'.format(self.name))
コード例 #21
0
ファイル: py-ssh-chroot.py プロジェクト: dnaeon/py-ssh-chroot
def chroot_create_user(user, user_chroot):
    """
    Creates the user and sets up the home directory

    """
    try:
        # get the user's home directory
        user_home = pwd.getpwnam(user)[5]
        print "=> Updating user's chroot => %s" % user
    except KeyError as e:
        print '=> User does not exists, will create it now ...'
        subprocess.call(['/usr/sbin/adduser', user])
        user_home = pwd.getpwnam(user)[5]

    # TODO: Add users to the SSH_GROUP group
        
    # we need to create the home directory of the user inside the chroot as well
    # and populate it with /etc/skel files
    if not os.path.exists(os.path.join(user_chroot, user_home[1:])):
        print '    * Installing chroot files => skel files for %s' % user
        shutil.copytree('/etc/skel', os.path.join(user_chroot, user_home[1:]))
    else:
        print '    * Re-installing chroot files => skel files for %s' % user
        shutil.rmtree(os.path.join(user_chroot, user_home[1:]))
        shutil.copytree('/etc/skel', os.path.join(user_chroot, user_home[1:]))
コード例 #22
0
	def test_user_mapping(self):
		"""Test the user mapping file through the DefinedMap class"""
		mapping_string = """
root:bin
bin:root
500:501
0:sync
sync:0"""
		Globals.isdest = 1
		rootid = 0
		binid = pwd.getpwnam('bin')[2]
		syncid = pwd.getpwnam('sync')[2]
		daemonid = pwd.getpwnam('daemon')[2]
		user_group.init_user_mapping(mapping_string)

		assert user_group.UserMap(rootid, 'root') == binid
		assert user_group.UserMap(binid, 'bin') == rootid
		assert user_group.UserMap(0) == syncid
		assert user_group.UserMap(syncid, 'sync') == 0
		assert user_group.UserMap(500) == 501

		assert user_group.UserMap(501) == 501
		assert user_group.UserMap(123, 'daemon') == daemonid

		assert user_group.UserMap.map_acl(29378, 'aoeuth3t2ug89') is None
		assert user_group.UserMap.map_acl(0, 'aoeuth3t2ug89') is syncid

		if 0: code.InteractiveConsole(globals()).interact()
コード例 #23
0
ファイル: acfg1.py プロジェクト: amplifylitco/asiaq
def get_or_create_ids(username, groupname):
    """
    Get the UID and GID for a user and group, creating the user and group if necessary.
    Users are created with no login shell: if they need a shell, downstream init scripts
    should update it.
    """
    try:
        gid = grp.getgrnam(groupname).gr_gid
    except KeyError:
        logger.info("Creating group %s", groupname)
        subprocess.call(['/usr/sbin/groupadd', '-f', groupname])
        gid = grp.getgrnam(groupname).gr_gid
    try:
        uid = pwd.getpwnam(username).pw_uid
    except KeyError:
        logger.info("Creating user %s", username)
        command = '/usr/sbin/adduser'
        command_input = ['--gid', str(gid), '--shell', '/sbin/nologin', username]
        exit_code = subprocess.call([command, '--system'] + command_input)
        # if the above command fails its highly likely that we are in a Centos 5
        # system and it doesnt have `--system` option instead it has `-r`.
        if exit_code != 0:
            subprocess.call([command, '-r'] + command_input)
        uid = pwd.getpwnam(username).pw_uid
    return uid, gid
コード例 #24
0
ファイル: tortp.py プロジェクト: alitalia/stem-tortp
def tor_new_process():
    """
    Drops privileges to TOR_USER user and start a new Tor process
    """
    debian_tor_uid = getpwnam(TOR_USER).pw_uid
    debian_tor_gid = getpwnam(TOR_USER).pw_gid
    os.setgid(debian_tor_gid)
    os.setuid(debian_tor_uid)
    os.setegid(debian_tor_gid)
    os.seteuid(debian_tor_uid)
    os.environ['HOME'] = "/var/lib/tor"

    tor_process = stem.process.launch_tor_with_config(
      config = {
        'SocksPort': '6666',
        'ControlPort': '6969',
        'DNSPort': '9053',
        'DNSListenAddress': '127.0.0.1',
        'AutomapHostsOnResolve': '1',
        'AutomapHostsSuffixes': '.exit,.onion',
        'VirtualAddrNetwork': '10.192.0.0/10',
        'TransPort': '9040',
        'TransListenAddress': '127.0.0.1',
        'AvoidDiskWrites': '1',
        'WarnUnsafeSocks': '1',
      })
コード例 #25
0
def create_start(user, app_name, _here, home):
    #create bin buat manual start
    filename = "%s/bin/start-%s" % (home, app_name)
    pid_file = "%s/tmp/%s.pid" % (home, app_name)
    log_file = "%s/log/%s" % (home, app_name)
    print ("Start application:", filename)

    uid = getpwnam(user).pw_uid
    gid = getpwnam(user).pw_gid
    create_dir(filename, uid, gid)
    create_dir(pid_file, uid, gid)
    create_dir(log_file, uid, gid)
    create_dir(log_file+'/log', uid, gid)
        
    with open(filename, 'wb') as f:
        f.write('#!/bin/bash\n')
        f.write("cd {home}/iso8583-forwarder\n".format(home=home))
        f.write("python {home}/iso8583-forwarder/iso8583-forwarder.py \\\n".format(home=home))
        f.write("    --log-dir={log_file} \\\n".format(log_file=log_file))
        f.write("    --pid-file={pid_file} $1\n".format(pid_file=pid_file))
        f.close()
    os.chmod(filename, 0755)
    
    #create bin buat service
    filename = "%s/bin/%s" % (home, app_name)
    with open(filename, 'wb') as f:
        f.write('#!/usr/bin/python\n')
        f.write("{home}/bin/start-{app_name} \\\n".format(home=home, app_name=app_name))
        f.close()
    os.chmod(filename, 0755)
コード例 #26
0
ファイル: freeipa.py プロジェクト: Kobus-Smit/AmbariKave
def create_user(user, groups=[], comment='', options=[]):
    """
    Helper function for creating a native linux user and its required groups.

    First tries to create all the required groups. Once done the user will be
    created and added to the group.

    Arguments:
        user (string): name of the user to create
        groups (list): if empty user will be added to its own group, if only
            one entry this will be used as the users primary group, if multiple
            entries the first entry will be the primary group and the rest
            additional groups.
    """
    for group in groups:
        create_group(group)
    try:
        pwd.getpwnam(user)
    except KeyError:
        if len(comment):
            options.extend(['-c', comment])

        if len(groups) == 0:
            subprocess.call(['useradd'] + options + [user])
        elif len(groups) == 1:
            subprocess.call(['useradd', '-g', groups[0]] + options + [user])
        else:
            subprocess.call(['useradd', '-g', groups[0], '-G', ','.join(groups[1:])] + options + [user])
コード例 #27
0
ファイル: pymaidentity.py プロジェクト: pudquick/pymaIdentity
 def __init__(self, name_or_uid = None):
     # If passed a string, assume user name
     # If passed a number, assume uid
     # If None, leave everything with a value of None
     
     # Initialize everything to None
     for i in self._fields:
         setattr(self, i, None)
     
     # Determine whether we were passed a name or a uid or a User
     if isinstance(name_or_uid, User):
         # Guessing it's a User object - clone the settings
         # Clone if user name or uid present, otherwise None
         if name_or_uid != None:
             if name_or_uid.name is not None:
                 pw_info = pwd.getpwnam(name_or_uid.name)
             else:
                 pw_info = pwd.getpwuid(name_or_uid.uid)
             self._init_with_pwd(pw_info)
     elif isinstance(name_or_uid, (int,long)):
         # Guessing it's a uid
         try:
             pw_info = pwd.getpwuid(name_or_uid)
             self._init_with_pwd(pw_info)
         except KeyError:
             self.uid = None
     elif isinstance(name_or_uid, basestring):
         # Guessing it's a user name
         try:
             pw_info = pwd.getpwnam(name_or_uid)
             self._init_with_pwd(pw_info)
         except KeyError:
             self.name = None
コード例 #28
0
ファイル: __init__.py プロジェクト: kbm1422/husky
def chown(path, user=None, group=None, recursive=False, exclude=None):
    logger.info("chown: path=%s, user=%s, group=%s, recursive=%s, exclude=%s", path, user, group, recursive, exclude)

    uid = pwd.getpwnam(user).pw_uid if user else -1
    gid = pwd.getpwnam(group).pw_gid if group else -1
    for subpath in find(path, False, exclude):
        os.chown(subpath, uid, gid)
コード例 #29
0
def deploy_files(staging_directory, instance_directory, file_list, username):
    """
    Copies the list of files from the staging directory to the instance directory.
    Will properly set permissions and setgid files based on their type.
    """

    # get uid and gid for default and problem user
    user = getpwnam(username)
    default = getpwnam(deploy_config.DEFAULT_USER)

    for f in file_list:
        # copy the file over, making the directories as needed
        output_path = join(instance_directory, f.path)
        if not os.path.isdir(os.path.dirname(output_path)):
            os.makedirs(os.path.dirname(output_path))
        shutil.copy2(join(staging_directory, f.path), output_path)

        # set the ownership based on the type of file
        if isinstance(f, ProtectedFile) or isinstance(f, ExecutableFile):
            os.chown(output_path, default.pw_uid, user.pw_gid)
        else:
            uid = default.pw_uid if f.user is None else getpwnam(f.user).pw_uid
            gid = default.pw_gid if f.group is None else getgrnam(f.group).gr_gid
            os.chown(output_path, uid, gid)

        # set the permissions appropriately
        os.chmod(output_path, f.permissions)
コード例 #30
0
ファイル: test_memory_cache.py プロジェクト: SSSD/sssd
def test_mc_zero_timeout(ldap_conn, zero_timeout_rfc2307):
    """
    Test that the memory cache is not created at all with memcache_timeout=0
    """
    # No memory cache files must be created
    assert len(os.listdir(config.MCACHE_PATH)) == 0

    ent.assert_passwd_by_name(
        'user1',
        dict(name='user1', passwd='*', uid=1001, gid=2001,
             gecos='1001', shell='/bin/bash'))
    ent.assert_passwd_by_uid(
        1001,
        dict(name='user1', passwd='*', uid=1001, gid=2001,
             gecos='1001', shell='/bin/bash'))

    ent.assert_group_by_name("group1", dict(name="group1", gid=2001))
    ent.assert_group_by_gid(2001, dict(name="group1", gid=2001))
    stop_sssd()

    # sssd is stopped; so the memory cache should not be used
    # in long living clients (py.test in this case)
    with pytest.raises(KeyError):
        pwd.getpwnam('user1')
    with pytest.raises(KeyError):
        pwd.getpwuid(1001)

    with pytest.raises(KeyError):
        grp.getgrnam('group1')
    with pytest.raises(KeyError):
        grp.getgrgid(2001)
コード例 #31
0
def swift_user(username='******'):
    user = pwd.getpwnam(username)
    return (user.pw_uid, user.pw_gid)
コード例 #32
0
ファイル: certdb.py プロジェクト: thalman/freeipa
    def create_db(self, user=None, group=None, mode=None, backup=False):
        """Create cert DB

        :param user: User owner the secdir
        :param group: Group owner of the secdir
        :param mode: Mode of the secdir
        :param backup: Backup the sedir files
        """
        if mode is not None:
            dirmode = mode
            filemode = mode & 0o666
            pwdfilemode = mode & 0o660
        else:
            dirmode = 0o750
            filemode = 0o640
            pwdfilemode = 0o640

        uid = -1
        gid = -1
        if user is not None:
            uid = pwd.getpwnam(user).pw_uid
        if group is not None:
            gid = grp.getgrnam(group).gr_gid

        if backup:
            for filename in self.backup_filenames:
                ipautil.backup_file(filename)

        if not os.path.exists(self.secdir):
            os.makedirs(self.secdir, dirmode)

        if not os.path.exists(self.pwd_file):
            # Create the password file for this db
            with io.open(os.open(self.pwd_file, os.O_CREAT | os.O_WRONLY,
                                 pwdfilemode),
                         'w',
                         closefd=True) as f:
                f.write(ipautil.ipa_generate_password())
                # flush and sync tempfile inode
                f.flush()
                os.fsync(f.fileno())

        # In case dbtype is auto, let certutil decide which type of DB
        # to create.
        if self.dbtype == 'auto':
            dbdir = self.secdir
        else:
            dbdir = '{}:{}'.format(self.dbtype, self.secdir)
        args = [
            paths.CERTUTIL,
            '-d',
            dbdir,
            '-N',
            '-f',
            self.pwd_file,
            # -@ in case it's an old db and it must be migrated
            '-@',
            self.pwd_file,
        ]
        ipautil.run(args, stdin=None, cwd=self.secdir)
        self._set_filenames(self._detect_dbtype())
        if self.filenames is None:
            # something went wrong...
            raise ValueError("Failed to create NSSDB at '{}'".format(
                self.secdir))

        # Finally fix up perms
        os.chown(self.secdir, uid, gid)
        os.chmod(self.secdir, dirmode)
        tasks.restore_context(self.secdir, force=True)
        for filename in self.filenames:
            if os.path.exists(filename):
                os.chown(filename, uid, gid)
                if filename == self.pwd_file:
                    new_mode = pwdfilemode
                else:
                    new_mode = filemode
                os.chmod(filename, new_mode)
                tasks.restore_context(filename, force=True)
コード例 #33
0
def sort_file(config, srcpath, dstpath, mediatype, action, infofile, shasum, chown, user, group, file_mode, directory_mode, metainfo_tag, dryrun):
    # Get UID and GID for chowning
    uid = pwd.getpwnam(user)[2]
    gid = grp.getgrnam(group)[2]

    logger(config, ">>> Parsing {}".format(srcpath))
    # Determine if srcpath is a directory, then act recursively
    if os.path.isdir(srcpath):
        for filename in sorted(os.listdir(srcpath)):
            child_filename = '{}/{}'.format(srcpath, filename)
            sort_file(config, child_filename, dstpath, mediatype, action, infofile, shasum, chown, user, group, file_mode, directory_mode, metainfo_tag, dryrun)
        return 0

    logger(config, "Sorting action:   {}".format(action))

    # Get our destination path and filename (media-specific)
    if mediatype == 'tv':
        file_dst_path, file_dst_filename = sort_tv_file(config, srcpath, dstpath)
    if mediatype == 'movie':
        file_dst_path, file_dst_filename = sort_movie_file(config, srcpath, dstpath, metainfo_tag)

    if not file_dst_filename:
        return 1

    # Ensure our dst_path exists or create it
    if not os.path.isdir(file_dst_path) and not dryrun:
        logger(config, "Creating target directory '{}'".format(file_dst_path))
        os.makedirs(file_dst_path)
        if chown:
            os.chown(file_dst_path, uid, gid)
            os.chmod(file_dst_path, int(directory_mode, 8))

    file_dst = '{}/{}'.format(file_dst_path, file_dst_filename)

    if dryrun:
        # Make the output quoted
        srcpath = '"{}"'.format(srcpath)
        file_dst = '"{}"'.format(file_dst)

    # Perform our action
    if action == 'symlink':
        action_cmd = ['ln', '-s', '{}'.format(srcpath), '{}'.format(file_dst)]
    if action == 'hardlink':
        action_cmd = ['ln', '{}'.format(srcpath), '{}'.format(file_dst)]
    if action == 'copy':
        action_cmd = ['cp', '{}'.format(srcpath), '{}'.format(file_dst)]
    if action == 'move':
        action_cmd = ['mv', '{}'.format(srcpath), '{}'.format(file_dst)]
 
    if dryrun:
        logger(config, "Sort command: {}".format(' '.join(action_cmd)))
        return 0

    # Run the action
    logger(config, "Running sort action... ", nl=False)
    process = subprocess.run(action_cmd)
    retcode = process.returncode
    logger(config, "done.")

    if retcode != 0:
        return retcode

    # Create info file
    if infofile:
        logger(config, "Creating info file... ", nl=False)
        infofile_name = '{}.txt'.format(file_dst)
        infofile_contents = [
            "Source filename:  {}".format(os.path.basename(srcpath)),
            "Source directory: {}".format(os.path.dirname(srcpath))
        ]
        with open(infofile_name, 'w') as fh:
            fh.write('\n'.join(infofile_contents))
            fh.write('\n')
        logger(config, "done.")

    # Create sha256sum file
    if shasum:
        logger(config, "Generating shasum file... ", nl=False)
        shasum_name = '{}.sha256sum'.format(file_dst)
        shasum_cmdout = subprocess.run(['sha256sum', '-b', '{}'.format(file_dst)], capture_output=True, encoding='utf8')
        shasum_data = shasum_cmdout.stdout
        if shasum_data:
            shasum_data = shasum_data.strip()
        with open(shasum_name, 'w') as fh:
            fh.write(shasum_data)
            fh.write('\n')
        logger(config, "done.")

    if chown:
        logger(config, "Correcting ownership and permissions... ", nl=False)
        os.chown(file_dst, uid, gid)
        os.chmod(file_dst, int(file_mode, 8))
        if infofile:
            os.chown(infofile_name, uid, gid)
            os.chmod(infofile_name, int(file_mode, 8))
        if shasum:
            os.chown(shasum_name, uid, gid)
            os.chmod(shasum_name, int(file_mode, 8))
        logger(config, "done.")

    return retcode
コード例 #34
0
ファイル: utilities.py プロジェクト: jirkade/getmail6
def uid_of_user(user):
    try:
        return pwd.getpwnam(user).pw_uid
    except KeyError as o:
        raise getmailConfigurationError('no such specified user (%s)' % o)
コード例 #35
0
ファイル: ansimple.py プロジェクト: catmin/ansimple
    def apply(self, item):
        data = item[self.provider]
        file_path = data["path"]

        set_mode = False
        if "mode" in data:
            if isinstance(data["mode"], int):
                file_mode = data["mode"]
                # TODO check range
            #elif g+x u+s
            else:
                raise Exception("invalid mode")
            set_mode = True

        change_owner = False
        if "owner" in data: 
            # get user details by username or uid
            if isinstance(data["owner"], int):
                file_owner = getpwuid(data["owner"])
            elif isinstance(data["owner"], str):
                file_owner = getpwnam(data["owner"])
            else:
                raise Exception("invalid data type for user")
            change_owner = True

        change_group = False
        if "group" in data: 
            # get group details by groupname or gid
            if isinstance(data["group"], int):
                file_group = getpwgid(data["group"])
            elif isinstance(data["owner"], str):
                file_group = getgrnam(data["group"])
            else:
                raise Exception("invalid data type for user")
            change_group = True

        # content
        write_contents_to_file = False
        if "content" in data:
            content = data["content"]
            write_contents_to_file = True
        elif "template" in data:
            template = data["template"]
            if not os.path.exists(template): raise Exception("template '{0}' not found".format(template))
            self.logger.debug("loading template '{0}'".format(template))
            with open(template, "r") as template_file:
                template = Template(template_file.read())
                if "vars" in data:
                    template_vars = data["vars"]
                else:
                    template_vars = {}
                content = template.substitute(template_vars)
            write_contents_to_file = True
        if write_contents_to_file:
            self.logger.debug("writing to file '{0}'".format(file_path))
            with open(file_path, "w") as outfile:
                outfile.write(content)

        # the file has to exist at this stage
        if not os.path.exists(file_path): raise Exception("file '{0}' not found.".format(file_path))

        # set mode
        if set_mode:
            self.logger.debug("setting mode to {0}".format(file_mode))
            os.chmod(file_path, file_mode) # PermissionError

        # set owner and group
        if change_owner:
            self.logger.debug("setting owner of '{1}' to '{0}'".format(file_owner.pw_name, file_path))
            os.chown(file_path, file_owner.pw_uid, -1) # change only the owner
        if change_group:
            self.logger.debug("setting group of '{1}' to '{0}'".format(file_group.gr_name, file_path))
            os.chown(file_path, -1, file_group.gr_gid) # change only the group

        return
コード例 #36
0
def getpwnam(name):
    return PWEntry(pwd.getpwnam(name))
コード例 #37
0
ファイル: horepgd.py プロジェクト: AlexMekkering/horepg
                        type=str,
                        default='',
                        help='the password used to login into TVHeadend')
    parser.add_argument('-R',
                        dest='do_radio_epg',
                        action='store_const',
                        const=True,
                        default=False,
                        help='fetch EPG data for radio channels')
    args = parser.parse_args()

    logging.basicConfig(level=logging.DEBUG)
    if (args.daemonize):
        # switch user and do daemonization
        try:
            uid = pwd.getpwnam(args.as_user).pw_uid
            gid = grp.getgrnam(args.as_group).gr_gid
        except KeyError as exc:
            debug(
                'Unable to find the user {0} and group {1} for daemonization'.
                format(args.as_user, args.as_group))
            sys.exit(1)

        pid_fd = open(args.pid_filename, 'w')

        switch_user(uid, gid)
        # switch to syslog
        logging.basicConfig(stream=logging.handlers.SysLogHandler())
        daemonize()
    else:
        pid_fd = open(args.pid_filename, 'w')
コード例 #38
0
ファイル: topogen.py プロジェクト: r00tc0d3/frr
def diagnose_env_linux():
    """
    Run diagnostics in the running environment. Returns `True` when everything
    is ok, otherwise `False`.
    """
    ret = True

    # Test log path exists before installing handler.
    if not os.path.isdir("/tmp"):
        logger.warning("could not find /tmp for logs")
    else:
        os.system("mkdir /tmp/topotests")
        # Log diagnostics to file so it can be examined later.
        fhandler = logging.FileHandler(
            filename="/tmp/topotests/diagnostics.txt")
        fhandler.setLevel(logging.DEBUG)
        fhandler.setFormatter(
            logging.Formatter(fmt="%(asctime)s %(levelname)s: %(message)s"))
        logger.addHandler(fhandler)

    logger.info("Running environment diagnostics")

    # Load configuration
    config = configparser.ConfigParser(defaults=tgen_defaults)
    pytestini_path = os.path.join(CWD, "../pytest.ini")
    config.read(pytestini_path)

    # Assert that we are running as root
    if os.getuid() != 0:
        logger.error("you must run topotest as root")
        ret = False

    # Assert that we have mininet
    if os.system("which mn >/dev/null 2>/dev/null") != 0:
        logger.error(
            "could not find mininet binary (mininet is not installed)")
        ret = False

    # Assert that we have iproute installed
    if os.system("which ip >/dev/null 2>/dev/null") != 0:
        logger.error("could not find ip binary (iproute is not installed)")
        ret = False

    # Assert that we have gdb installed
    if os.system("which gdb >/dev/null 2>/dev/null") != 0:
        logger.error("could not find gdb binary (gdb is not installed)")
        ret = False

    # Assert that FRR utilities exist
    frrdir = config.get("topogen", "frrdir")
    if not os.path.isdir(frrdir):
        logger.error("could not find {} directory".format(frrdir))
        ret = False
    else:
        try:
            pwd.getpwnam("frr")[2]
        except KeyError:
            logger.warning('could not find "frr" user')

        try:
            grp.getgrnam("frr")[2]
        except KeyError:
            logger.warning('could not find "frr" group')

        try:
            if "frr" not in grp.getgrnam("frrvty").gr_mem:
                logger.error(
                    '"frr" user and group exist, but user is not under "frrvty"'
                )
        except KeyError:
            logger.warning('could not find "frrvty" group')

        for fname in [
                "zebra",
                "ospfd",
                "ospf6d",
                "bgpd",
                "ripd",
                "ripngd",
                "isisd",
                "pimd",
                "ldpd",
                "pbrd",
        ]:
            path = os.path.join(frrdir, fname)
            if not os.path.isfile(path):
                # LDPd is an exception
                if fname == "ldpd":
                    logger.info(
                        "could not find {} in {}".format(fname, frrdir) +
                        "(LDPd tests will not run)")
                    continue

                logger.warning("could not find {} in {}".format(fname, frrdir))
                ret = False
            else:
                if fname != "zebra":
                    continue

                os.system(
                    "{} -v 2>&1 >/tmp/topotests/frr_zebra.txt".format(path))

    # Test MPLS availability
    krel = platform.release()
    if topotest.version_cmp(krel, "4.5") < 0:
        logger.info(
            'LDPd tests will not run (have kernel "{}", but it requires 4.5)'.
            format(krel))

    # Test for MPLS Kernel modules available
    if not topotest.module_present("mpls-router", load=False) != 0:
        logger.info(
            "LDPd tests will not run (missing mpls-router kernel module)")
    if not topotest.module_present("mpls-iptunnel", load=False) != 0:
        logger.info(
            "LDPd tests will not run (missing mpls-iptunnel kernel module)")

    # TODO remove me when we start supporting exabgp >= 4
    try:
        p = os.popen("exabgp -v")
        line = p.readlines()
        version = line[0].split()
        if topotest.version_cmp(version[2], "4") >= 0:
            logger.warning(
                "BGP topologies are still using exabgp version 3, expect failures"
            )
        p.close()

    # We want to catch all exceptions
    # pylint: disable=W0702
    except:
        logger.warning("failed to find exabgp or returned error")

    # After we logged the output to file, remove the handler.
    logger.removeHandler(fhandler)
    fhandler.close()

    return ret
コード例 #39
0
ファイル: common.py プロジェクト: vishnuravichand/wazuh
def ossec_uid():
    return getpwnam("ossec").pw_uid if globals(
    )['_ossec_uid'] is None else globals()['_ossec_uid']
コード例 #40
0
ファイル: start.py プロジェクト: 3rd-Eden/orbited
    #static_files.putChild('orbited.swf', static.File(os.path.join(os.path.dirname(__file__), 'flash', 'orbited.swf')))
    site = server.Site(root)

    _setup_protocols(root)
    _setup_static(root, config.map)
    start_listening(site, config.map, logger)

    # switch uid and gid to configured user and group.
    if os.name == 'posix' and os.getuid() == 0:
        user = config.map['[global]'].get('user')
        group = config.map['[global]'].get('group')
        if user:
            import pwd
            import grp
            try:
                pw = pwd.getpwnam(user)
                uid = pw.pw_uid
                if group:
                    gr = grp.getgrnam(group)
                    gid = gr.gr_gid
                else:
                    gid = pw.pw_gid
                    gr = grp.getgrgid(gid)
                    group = gr.gr_name
            except Exception, e:
                logger.error('Aborting; Unknown user or group: %s' % e)
                sys.exit(1)
            logger.info('switching to user %s (uid=%d) and group %s (gid=%d)' %
                        (user, uid, group, gid))
            os.setgid(gid)
            os.setuid(uid)
コード例 #41
0
def run_script(form_def, form_values, stdout=None, stderr=None):
    """
    Perform a callback for the form `form_def`. This calls a script.
    `form_values` is a dictionary of validated values as returned by
    FormDefinition.validate(). If form_def.output is of type 'raw', `stdout`
    and `stderr` have to be open filehandles where the output of the
    callback should be written. The output of the script is hooked up to
    the output, depending on the output type.
    """
    # Validate params
    if form_def.output == 'raw' and (stdout is None or stderr is None):
        msg = 'stdout and stderr cannot be none if script output ' \
              'is \'raw\''
        raise ValueError(msg)

    # Pass form values to the script through the environment as strings.
    env = os.environ.copy()
    for key, value in form_values.items():
        env[key] = str(value)

    # Get the user uid, gid and groups we should run as. If the current
    # user is root, we run as the given user or 'nobody' if no user was
    # specified. Otherwise, we run as the user we already are.
    if os.getuid() == 0:
        if form_def.run_as is not None:
            runas_pw = pwd.getpwnam(form_def.run_as)
        else:
            # Run as nobody
            runas_pw = pwd.getpwnam('nobody')
        runas_gr = grp.getgrgid(runas_pw.pw_gid)
        groups = [
            g.gr_gid for g in grp.getgrall() if runas_pw.pw_name in g.gr_mem
        ]
        msg = "Running script as user={0}, gid={1}, groups={2}"
        run_as_fn = run_as(runas_pw.pw_uid, runas_pw.pw_gid, groups)
        log.info(msg.format(runas_pw.pw_name, runas_gr.gr_name, str(groups)))
    else:
        run_as_fn = None
        if form_def.run_as is not None:
            log.critical("Not running as root, so we can't run the "
                         "script as user '{0}'".format(form_def.run_as))

    # If the form output type is 'raw', we directly stream the output to
    # the browser. Otherwise we store it for later displaying.
    if form_def.output == 'raw':
        try:
            proc = subprocess.Popen(form_def.script,
                                    shell=True,
                                    stdout=stdout,
                                    stderr=stderr,
                                    env=env,
                                    close_fds=True,
                                    preexec_fn=run_as_fn)
            stdout, stderr = proc.communicate(input)
            log.info("Exit code: {0}".format(proc.returncode))
            return proc.returncode
        except OSError as err:
            log.exception(err)
            stderr.write(str(err) + '. Please see the log file.')
            return -1
    else:
        try:
            proc = subprocess.Popen(form_def.script,
                                    shell=True,
                                    stdin=subprocess.PIPE,
                                    stdout=subprocess.PIPE,
                                    stderr=subprocess.PIPE,
                                    env=env,
                                    close_fds=True,
                                    preexec_fn=run_as_fn)
            stdout, stderr = proc.communicate()
            log.info("Exit code: {0}".format(proc.returncode))
            return {
                'stdout': stdout,
                'stderr': stderr,
                'exitcode': proc.returncode
            }
        except OSError as err:
            log.exception(err)
            return {
                'stdout':
                '',
                'stderr':
                'Internal error: {0}. Please see the log '
                'file.'.format(str(err)),
                'exitcode':
                -1
            }
コード例 #42
0
ファイル: rspamd.py プロジェクト: zhangjinde/rspamd
def set_directory_ownership(path, username, groupname):
    if os.getuid() == 0:
        uid = pwd.getpwnam(username).pw_uid
        gid = grp.getgrnam(groupname).gr_gid
        os.chown(path, uid, gid)
コード例 #43
0
    except ValueError:
        usage(1, 'Bad remote port: %s' % remotespec)
    return options


if __name__ == '__main__':
    options = parseargs()
    # Become nobody
    if options.setuid:
        try:
            import pwd
        except ImportError:
            print >> sys.stderr, \
                  'Cannot import module "pwd"; try running with -n option.'
            sys.exit(1)
        nobody = pwd.getpwnam('nobody')[2]
        try:
            os.setuid(nobody)
        except OSError, e:
            if e.errno != errno.EPERM: raise
            print >> sys.stderr, \
                  'Cannot setuid "nobody"; try running with -n option.'
            sys.exit(1)
    classname = options.classname
    if "." in classname:
        lastdot = classname.rfind(".")
        mod = __import__(classname[:lastdot], globals(), locals(), [""])
        classname = classname[lastdot + 1:]
    else:
        import __main__ as mod
    class_ = getattr(mod, classname)
コード例 #44
0
import os
import sys

sys.path.append(os.path.dirname(os.path.dirname(__file__)))

import config as cfg
import pwd

PWD_UID = 2
PWD_GID = 3

pwd_data = pwd.getpwnam("www-data")
user_uid = pwd_data[PWD_UID]
user_gid = pwd_data[PWD_GID]

curr_path = os.path.dirname(__file__)

cfg.init()

port = os.environ.get("BIND_PORT")

# Server
bind = "127.0.0.1:" + str(port)

# Worker Processes
worker = 1

# Server Mechanics
# user = int(user_uid)
# group = int(user_gid)
コード例 #45
0
ファイル: conf.py プロジェクト: tomyanchan/carbon
    def postOptions(self):
        global settings

        program = self.parent.subCommand

        # Use provided pidfile (if any) as default for configuration. If it's
        # set to 'twistd.pid', that means no value was provided and the default
        # was used.
        pidfile = self.parent["pidfile"]
        if pidfile.endswith("twistd.pid"):
            pidfile = None
        self["pidfile"] = pidfile

        # Enforce a default umask of '022' if none was set.
        if not self.parent.has_key("umask") or self.parent["umask"] is None:
            self.parent["umask"] = 022

        # Read extra settings from the configuration file.
        program_settings = read_config(program, self)
        settings.update(program_settings)
        settings["program"] = program

        # Set process uid/gid by changing the parent config, if a user was
        # provided in the configuration file.
        if settings.USER:
            self.parent["uid"], self.parent["gid"] = (
                pwd.getpwnam(settings.USER)[2:4])

        # Set the pidfile in parent config to the value that was computed by
        # C{read_config}.
        self.parent["pidfile"] = settings["pidfile"]

        storage_schemas = join(settings["CONF_DIR"], "storage-schemas.conf")
        if not exists(storage_schemas):
            print "Error: missing required config %s" % storage_schemas
            sys.exit(1)

        if settings.WHISPER_AUTOFLUSH:
            log.msg("Enabling Whisper autoflush")
            whisper.AUTOFLUSH = True

        if settings.WHISPER_FALLOCATE_CREATE:
            if whisper.CAN_FALLOCATE:
                log.msg("Enabling Whisper fallocate support")
            else:
                log.err("WHISPER_FALLOCATE_CREATE is enabled but linking failed.")

        if settings.WHISPER_LOCK_WRITES:
            if whisper.CAN_LOCK:
                log.msg("Enabling Whisper file locking")
                whisper.LOCK = True
            else:
                log.err("WHISPER_LOCK_WRITES is enabled but import of fcntl module failed.")

        if not "action" in self:
            self["action"] = "start"
        self.handleAction()

        # If we are not running in debug mode or non-daemon mode, then log to a
        # directory, otherwise log output will go to stdout. If parent options
        # are set to log to syslog, then use that instead.
        if not self["debug"]:
            if self.parent.get("syslog", None):
                log.logToSyslog(self.parent["prefix"])
            elif not self.parent["nodaemon"]:
                logdir = settings.LOG_DIR
                if not isdir(logdir):
                    os.makedirs(logdir)
                    if settings.USER:
                        # We have not yet switched to the specified user,
                        # but that user must be able to create files in this
                        # directory.
                        os.chown(logdir, self.parent["uid"], self.parent["gid"])
                log.logToDir(logdir)

        if self["whitelist"] is None:
            self["whitelist"] = join(settings["CONF_DIR"], "whitelist.conf")
        settings["whitelist"] = self["whitelist"]

        if self["blacklist"] is None:
            self["blacklist"] = join(settings["CONF_DIR"], "blacklist.conf")
        settings["blacklist"] = self["blacklist"]
コード例 #46
0
def diagnose_env():
    """
    Run diagnostics in the running environment. Returns `True` when everything
    is ok, otherwise `False`.
    """
    ret = True

    # Test log path exists before installing handler.
    if not os.path.isdir('/tmp'):
        logger.warning('could not find /tmp for logs')
    else:
        os.system('mkdir /tmp/topotests')
        # Log diagnostics to file so it can be examined later.
        fhandler = logging.FileHandler(
            filename='/tmp/topotests/diagnostics.txt')
        fhandler.setLevel(logging.DEBUG)
        fhandler.setFormatter(
            logging.Formatter(fmt='%(asctime)s %(levelname)s: %(message)s'))
        logger.addHandler(fhandler)

    logger.info('Running environment diagnostics')

    # Load configuration
    config = ConfigParser.ConfigParser(tgen_defaults)
    pytestini_path = os.path.join(CWD, '../pytest.ini')
    config.read(pytestini_path)

    # Assert that we are running as root
    if os.getuid() != 0:
        logger.error('you must run topotest as root')
        ret = False

    # Assert that we have mininet
    if os.system('which mn >/dev/null 2>/dev/null') != 0:
        logger.error(
            'could not find mininet binary (mininet is not installed)')
        ret = False

    # Assert that we have iproute installed
    if os.system('which ip >/dev/null 2>/dev/null') != 0:
        logger.error('could not find ip binary (iproute is not installed)')
        ret = False

    # Assert that we have gdb installed
    if os.system('which gdb >/dev/null 2>/dev/null') != 0:
        logger.error('could not find gdb binary (gdb is not installed)')
        ret = False

    # Assert that FRR utilities exist
    frrdir = config.get('topogen', 'frrdir')
    hasfrr = False
    if not os.path.isdir(frrdir):
        logger.error('could not find {} directory'.format(frrdir))
        ret = False
    else:
        hasfrr = True
        try:
            pwd.getpwnam('frr')[2]
        except KeyError:
            logger.warning('could not find "frr" user')

        try:
            grp.getgrnam('frr')[2]
        except KeyError:
            logger.warning('could not find "frr" group')

        try:
            if 'frr' not in grp.getgrnam('frrvty').gr_mem:
                logger.error(
                    '"frr" user and group exist, but user is not under "frrvty"'
                )
        except KeyError:
            logger.warning('could not find "frrvty" group')

        for fname in [
                'zebra', 'ospfd', 'ospf6d', 'bgpd', 'ripd', 'ripngd', 'isisd',
                'pimd', 'ldpd'
        ]:
            path = os.path.join(frrdir, fname)
            if not os.path.isfile(path):
                # LDPd is an exception
                if fname == 'ldpd':
                    logger.info(
                        'could not find {} in {}'.format(fname, frrdir) +
                        '(LDPd tests will not run)')
                    continue

                logger.warning('could not find {} in {}'.format(fname, frrdir))
                ret = False
            else:
                if fname != 'zebra':
                    continue

                os.system(
                    '{} -v 2>&1 >/tmp/topotests/frr_zebra.txt'.format(path))

    # Assert that Quagga utilities exist
    quaggadir = config.get('topogen', 'quaggadir')
    if hasfrr:
        # if we have frr, don't check for quagga
        pass
    elif not os.path.isdir(quaggadir):
        logger.info(
            'could not find {} directory (quagga tests will not run)'.format(
                quaggadir))
    else:
        ret = True
        try:
            pwd.getpwnam('quagga')[2]
        except KeyError:
            logger.info('could not find "quagga" user')

        try:
            grp.getgrnam('quagga')[2]
        except KeyError:
            logger.info('could not find "quagga" group')

        try:
            if 'quagga' not in grp.getgrnam('quaggavty').gr_mem:
                logger.error(
                    '"quagga" user and group exist, but user is not under "quaggavty"'
                )
        except KeyError:
            logger.warning('could not find "quaggavty" group')

        for fname in [
                'zebra', 'ospfd', 'ospf6d', 'bgpd', 'ripd', 'ripngd', 'isisd',
                'pimd'
        ]:
            path = os.path.join(quaggadir, fname)
            if not os.path.isfile(path):
                logger.warning('could not find {} in {}'.format(
                    fname, quaggadir))
                ret = False
            else:
                if fname != 'zebra':
                    continue

                os.system(
                    '{} -v 2>&1 >/tmp/topotests/quagga_zebra.txt'.format(path))

    # Test MPLS availability
    krel = platform.release()
    if topotest.version_cmp(krel, '4.5') < 0:
        logger.info(
            'LDPd tests will not run (have kernel "{}", but it requires 4.5)'.
            format(krel))

    # Test for MPLS Kernel modules available
    if os.system('/sbin/modprobe -n mpls-router') != 0:
        logger.info(
            'LDPd tests will not run (missing mpls-router kernel module)')
    if os.system('/sbin/modprobe -n mpls-iptunnel') != 0:
        logger.info(
            'LDPd tests will not run (missing mpls-iptunnel kernel module)')

    # TODO remove me when we start supporting exabgp >= 4
    try:
        output = subprocess.check_output(['exabgp', '-v'])
        line = output.split('\n')[0]
        version = line.split(' ')[2]
        if topotest.version_cmp(version, '4') >= 0:
            logger.warning(
                'BGP topologies are still using exabgp version 3, expect failures'
            )

    # We want to catch all exceptions
    # pylint: disable=W0702
    except:
        logger.warning('failed to find exabgp or returned error')

    # After we logged the output to file, remove the handler.
    logger.removeHandler(fhandler)

    return ret
コード例 #47
0
    def run_test_case(self,
                      test_file_name,
                      vector,
                      use_db=None,
                      multiple_impalad=False,
                      encoding=None,
                      test_file_vars=None):
        """
    Runs the queries in the specified test based on the vector values

    Runs the query using targeting the file format/compression specified in the test
    vector and the exec options specified in the test vector. If multiple_impalad=True
    a connection to a random impalad will be chosen to execute each test section.
    Otherwise, the default impalad client will be used.
    Additionally, the encoding for all test data can be specified using the 'encoding'
    parameter. This is useful when data is ingested in a different encoding (ex.
    latin). If not set, the default system encoding will be used.
    If a dict 'test_file_vars' is provided, then all keys will be replaced with their
    values in queries before they are executed. Callers need to avoid using reserved key
    names, see 'reserved_keywords' below.
    """
        table_format_info = vector.get_value('table_format')
        exec_options = vector.get_value('exec_option')

        # Resolve the current user's primary group name.
        group_id = pwd.getpwnam(getuser()).pw_gid
        group_name = grp.getgrgid(group_id).gr_name

        target_impalad_clients = list()
        if multiple_impalad:
            target_impalad_clients =\
                map(ImpalaTestSuite.create_impala_client, IMPALAD_HOST_PORT_LIST)
        else:
            target_impalad_clients = [self.client]

        # Change the database to reflect the file_format, compression codec etc, or the
        # user specified database for all targeted impalad.
        for impalad_client in target_impalad_clients:
            ImpalaTestSuite.change_database(impalad_client, table_format_info,
                                            use_db,
                                            pytest.config.option.scale_factor)
            impalad_client.set_configuration(exec_options)

        sections = self.load_query_test_file(self.get_workload(),
                                             test_file_name,
                                             encoding=encoding)
        for test_section in sections:
            if 'SHELL' in test_section:
                assert len(test_section) == 1, \
                  "SHELL test sections can't contain other sections"
                cmd = test_section['SHELL']\
                  .replace('$FILESYSTEM_PREFIX', FILESYSTEM_PREFIX)\
                  .replace('$IMPALA_HOME', IMPALA_HOME)
                if use_db: cmd = cmd.replace('$DATABASE', use_db)
                LOG.info("Shell command: " + cmd)
                check_call(cmd, shell=True)
                continue

            if 'QUERY' not in test_section:
                assert 0, 'Error in test file %s. Test cases require a -- QUERY section.\n%s' %\
                    (test_file_name, pprint.pformat(test_section))

            if 'SETUP' in test_section:
                self.execute_test_case_setup(test_section['SETUP'],
                                             table_format_info)

            # TODO: support running query tests against different scale factors
            query = QueryTestSectionReader.build_query(
                test_section['QUERY'].replace(
                    '$GROUP_NAME',
                    group_name).replace('$IMPALA_HOME', IMPALA_HOME).replace(
                        '$FILESYSTEM_PREFIX', FILESYSTEM_PREFIX).replace(
                            '$SECONDARY_FILESYSTEM',
                            os.getenv("SECONDARY_FILESYSTEM") or str()))
            if use_db: query = query.replace('$DATABASE', use_db)

            reserved_keywords = [
                "$DATABASE", "$FILESYSTEM_PREFIX", "$GROUP_NAME",
                "$IMPALA_HOME", "$NAMENODE", "$QUERY", "$SECONDARY_FILESYSTEM"
            ]

            if test_file_vars:
                for key, value in test_file_vars.iteritems():
                    if key in reserved_keywords:
                        raise RuntimeError("Key {0} is reserved".format(key))
                    query = query.replace(key, value)

            if 'QUERY_NAME' in test_section:
                LOG.info('Query Name: \n%s\n' % test_section['QUERY_NAME'])

            # Support running multiple queries within the same test section, only verifying the
            # result of the final query. The main use case is to allow for 'USE database'
            # statements before a query executes, but it is not limited to that.
            # TODO: consider supporting result verification of all queries in the future
            result = None
            target_impalad_client = choice(target_impalad_clients)
            query_options_changed = []
            try:
                user = None
                if 'USER' in test_section:
                    # Create a new client so the session will use the new username.
                    user = test_section['USER'].strip()
                    target_impalad_client = self.create_impala_client()
                for query in query.split(';'):
                    set_pattern_match = SET_PATTERN.match(query)
                    if set_pattern_match != None:
                        query_options_changed.append(
                            set_pattern_match.groups()[0])
                    result = self.__execute_query(target_impalad_client,
                                                  query,
                                                  user=user)
            except Exception as e:
                if 'CATCH' in test_section:
                    self.__verify_exceptions(test_section['CATCH'], str(e),
                                             use_db)
                    continue
                raise
            finally:
                if len(query_options_changed) > 0:
                    self.__restore_query_options(query_options_changed,
                                                 target_impalad_client)

            if 'CATCH' in test_section and '__NO_ERROR__' not in test_section[
                    'CATCH']:
                expected_str = " or ".join(test_section['CATCH']).strip() \
                  .replace('$FILESYSTEM_PREFIX', FILESYSTEM_PREFIX) \
                  .replace('$NAMENODE', NAMENODE) \
                  .replace('$IMPALA_HOME', IMPALA_HOME)
                assert False, "Expected exception: %s" % expected_str

            assert result is not None
            assert result.success

            # Decode the results read back if the data is stored with a specific encoding.
            if encoding:
                result.data = [row.decode(encoding) for row in result.data]
            # Replace $NAMENODE in the expected results with the actual namenode URI.
            if 'RESULTS' in test_section:
                # Combining 'RESULTS' with 'DML_RESULTS" is currently unsupported because
                # __verify_results_and_errors calls verify_raw_results which always checks
                # ERRORS, TYPES, LABELS, etc. which doesn't make sense if there are two
                # different result sets to consider (IMPALA-4471).
                assert 'DML_RESULTS' not in test_section
                self.__verify_results_and_errors(vector, test_section, result,
                                                 use_db)
            else:
                # TODO: Can't validate errors without expected results for now.
                assert 'ERRORS' not in test_section,\
                  "'ERRORS' sections must have accompanying 'RESULTS' sections"
            # If --update_results, then replace references to the namenode URI with $NAMENODE.
            if pytest.config.option.update_results and 'RESULTS' in test_section:
                test_section['RESULTS'] = test_section['RESULTS'] \
                    .replace(NAMENODE, '$NAMENODE') \
                    .replace('$IMPALA_HOME', IMPALA_HOME)
            if 'RUNTIME_PROFILE_%s' % table_format_info.file_format in test_section:
                # If this table format has a RUNTIME_PROFILE section specifically for it, evaluate
                # that section and ignore any general RUNTIME_PROFILE sections.
                verify_runtime_profile(
                    test_section['RUNTIME_PROFILE_%s' %
                                 table_format_info.file_format],
                    result.runtime_profile)
            elif 'RUNTIME_PROFILE' in test_section:
                verify_runtime_profile(test_section['RUNTIME_PROFILE'],
                                       result.runtime_profile)

            if 'DML_RESULTS' in test_section:
                assert 'ERRORS' not in test_section
                # The limit is specified to ensure the queries aren't unbounded. We shouldn't have
                # test files that are checking the contents of tables larger than that anyways.
                dml_results_query = "select * from %s limit 1000" % \
                    test_section['DML_RESULTS_TABLE']
                dml_result = self.__execute_query(target_impalad_client,
                                                  dml_results_query)
                verify_raw_results(
                    test_section,
                    dml_result,
                    vector.get_value('table_format').file_format,
                    pytest.config.option.update_results,
                    result_section='DML_RESULTS')
        if pytest.config.option.update_results:
            output_file = os.path.join(
                EE_TEST_LOGS_DIR,
                test_file_name.replace('/', '_') + ".test")
            write_test_file(output_file, sections, encoding=encoding)
コード例 #48
0
ファイル: entry.py プロジェクト: devsadds/dockers
   try:
       template_list[template_item]['render'] = template_list[template_item]['template'].\
                                            render(template_list[template_item]['context'])

       # Submit to file

       template_list[template_item]['file'].write(template_list[template_item]['render'].encode('utf8'))
       template_list[template_item]['file'].close()
   except:
       e = sys.exc_info()[0]
       print "Unrecognised exception occured, was unable to create template (returned %s), terminating..." % e
       sys.exit(0) # This should be a return 0 to prevent the container from restarting.

   # Change owner and group
   try:
       template_list[template_item]['uid'] = pwd.getpwnam(template_list[template_item]['user']).pw_uid
   except KeyError as e:
       errormsg = "The user %s does not exist for template %s" % template_list[template_item]['user'], template_item
       errormsg += "(returned %s), terminating..." % e
       print errormsg
       sys.exit(0) # This should be a return 0 to prevent the container from restarting

   try:
       template_list[template_item]['gid'] = grp.getgrnam(template_list[template_item]['group']).gr_gid
   except KeyError as e:
       errormsg = "The group %s does not exist for template %s" % template_list[template_item]['group'], template_item
       errormsg += "(returned %s), terminating..." % e
       print errormsg
       sys.exit(0) # This should be a return 0 to prevent the container from restarting

   try:
コード例 #49
0
ファイル: osimage.py プロジェクト: kostty/luna
    def pack_boot(self):
        def mount(source, target, fs):
            subprocess.Popen(['/usr/bin/mount', '-t', fs, source, target])
            #ret = ctypes.CDLL('libc.so.6', use_errno=True).mount(source, target, fs, 0, options)
            #if ret < 0:
            #    errno = ctypes.get_errno()
            #    raise RuntimeError("Error mounting {} ({}) on {} with options '{}': {}".
            #        format(source, fs, target, options, os.strerror(errno)))
        def umount(source):
            subprocess.Popen(['/usr/bin/umount', source])
            #ret = ctypes.CDLL('libc.so.6', use_errno=True).umount(source)
            #if ret < 0:
            #    errno = ctypes.get_errno()
            #    raise RuntimeError("Error umounting {}: .".
            #        format(source, os.strerror(errno)))
        def prepare_mounts(path):
            mount('devtmpfs', path + '/dev', 'devtmpfs')
            mount('proc', path + '/proc', 'proc')
            mount('sysfs', path + '/sys', 'sysfs')
        def cleanup_mounts(path):
            umount(path + '/dev')
            umount(path + '/proc')
            umount(path + '/sys')
        cluster = Cluster(mongo_db = self._mongo_db)
        #boot_prefix = '/boot'
        image_path = str(self.get('path'))
        kernver = str(self.get('kernver'))
        tmp_path = '/tmp' # in chroot env
        initrdfile = str(self.name) + '-initramfs-' + kernver
        kernfile = str(self.name) + '-vmlinuz-' + kernver
        #kernel_image = kernel_name + '-' + kernver
        #kernel_path = image_path + boot_prefix +  '/' +  kernel_image
        path = cluster.get('path')
        if not path:
            self._logger.error("Path needs to be configured.")
            return None
        path = str(path)
        user = cluster.get('user')
        if not user:
            self._logger.error("User needs to be configured.")
            return None
        path_to_store = path + "/boot"
        user_id = pwd.getpwnam(user).pw_uid
        grp_id = pwd.getpwnam(user).pw_gid
        if not os.path.exists(path_to_store):
            os.makedirs(path_to_store)
            os.chown(path_to_store, user_id, grp_id)
        modules_add = []
        modules_remove = []
        drivers_add = []
        drivers_remove = []
        dracutmodules = self.get('dracutmodules')
        if dracutmodules:
            dracutmodules = str(dracutmodules)
            modules_add =    sum([['--add', i]      for i in dracutmodules.split(',') if i[0] != '-'], [])
            modules_remove = sum([['--omit', i[1:]] for i in dracutmodules.split(',') if i[0] == '-'], [])
        kernmodules = self.get('kernmodules')
        if kernmodules:
            kernmodules = str(kernmodules)
            drivers_add =    sum([['--add-drivers',  i]     for i in kernmodules.split(',') if i[0] != '-'], [])
            drivers_remove = sum([['--omit-drivers', i[1:]] for i in kernmodules.split(',') if i[0] == '-'], [])
        prepare_mounts(image_path)
        real_root = os.open("/", os.O_RDONLY)
        os.chroot(image_path)

        try:
            dracut_modules = subprocess.Popen(['/usr/sbin/dracut', '--kver', kernver, '--list-modules'], stdout=subprocess.PIPE)
            luna_exists = False
            while dracut_modules.poll() is None:
                line = dracut_modules.stdout.readline()
                if line.strip() == 'luna':
                    luna_exists = True
            if not luna_exists:
                self._logger.error("No luna dracut module in osimage '{}'".format(self.name))
                raise RuntimeError
            dracut_cmd =  ['/usr/sbin/dracut', '--force', '--kver', kernver] + modules_add + modules_remove + drivers_add + drivers_remove + [tmp_path + '/' + initrdfile]
            dracut_create = subprocess.Popen(dracut_cmd, stdout=subprocess.PIPE)
            while dracut_create.poll() is None:
                line = dracut_create.stdout.readline()
        except:
            self._logger.error("Error on building initrd.")
            os.fchdir(real_root)
            os.chroot(".")
            os.close(real_root)
            cleanup_mounts(image_path)
            try:
                pass
                #os.remove(image_path + '/' + tmp_path + '/' + initrdfile)
            except:
                pass
            return None

        os.fchdir(real_root)
        os.chroot(".")
        os.close(real_root)
        cleanup_mounts(image_path)
        shutil.copy(image_path + tmp_path + '/' + initrdfile, path_to_store)
        shutil.copy(image_path + '/boot/vmlinuz-' + kernver, path_to_store + '/' + kernfile)
        os.chown(path_to_store + '/' + initrdfile, user_id, grp_id)
        os.chmod(path_to_store + '/' + initrdfile, 0644)
        os.chown(path_to_store + '/' + kernfile, user_id, grp_id)
        os.chmod(path_to_store + '/' + kernfile, 0644)
        self.set('kernfile', kernfile)
        self.set('initrdfile', initrdfile)
コード例 #50
0
#!/usr/bin/python

import os
import pwd
import grp
import json

# DEFINE VARIABLES
bool_path = "/etc/consul.d/filament_bool"
bool_dict = {"is_available": "True"}
uid = pwd.getpwnam('consul').pw_uid
gid = grp.getgrnam('consul').gr_gid

# MAKE AVAILABLE
with open(bool_path, 'w') as outfile:
    json.dump(bool_dict, outfile)

os.chown(bool_path, uid, gid)
コード例 #51
0
                else:
                    print >> sys.stderr, (
                        "ERROR: Pidfile exists. Server already running?")
                    sys.exit(1)

            # Get final GIDs
            if os.name != 'nt':
                if options.group is not None:
                    gid = grp.getgrnam(options.group).gr_gid
                elif len(config['server']['group']):
                    gid = grp.getgrnam(config['server']['group']).gr_gid

            # Get final UID
            if os.name != 'nt':
                if options.user is not None:
                    uid = pwd.getpwnam(options.user).pw_uid
                elif len(config['server']['user']):
                    uid = pwd.getpwnam(config['server']['user']).pw_uid

            # Fix up pid permissions
            if not options.foreground and not options.collector:
                # Write pid file
                pid = str(os.getpid())
                try:
                    pf = file(options.pidfile, 'w+')
                except IOError, e:
                    print >> sys.stderr, "Failed to write PID file: %s" % (e)
                    sys.exit(1)
                pf.write("%s\n" % pid)
                pf.close()
                os.chown(options.pidfile, uid, gid)
コード例 #52
0
 def uid(self):
     return pwd.getpwnam(self.user).pw_uid
コード例 #53
0
ファイル: arrangements.py プロジェクト: benhe119/wazuh-qa
import subprocess

config = '<ossec_config><syscheck><directories check_all="yes" realtime="yes" check_owner="yes">/fim_test</directories></syscheck></ossec_config>'
test_file = "/fim_test/check_change_file_perm_test.txt"
test_dir = "/fim_test"

with open("/var/ossec/etc/ossec.conf", "a") as conf:
    conf.write(config)

if not os.path.exists(test_dir):
    os.mkdir(test_dir)

file = open(test_file, "w")
file.close()

uid = pwd.getpwnam("root").pw_uid
gid = grp.getgrnam("root").gr_gid
os.chown(test_file, uid, gid)

try:
    grp.getgrnam('wazuh')
except KeyError:
    os.system("groupadd -g 1005 wazuh")

try:
    pwd.getpwnam('wazuh')
except KeyError:
    os.system("useradd -g 1005 -u 1002 wazuh")

# restart wazuh-agent service
p = subprocess.Popen(["service", "wazuh-agent", "restart"])
コード例 #54
0
ファイル: ebd_ipc.py プロジェクト: shen390s/pkgcore
def _parse_user(user):
    try:
        return pwd.getpwnam(user).pw_uid
    except KeyError:
        pass
    return int(user)
コード例 #55
0
ファイル: deploy.py プロジェクト: sdlinn/picoCTF
def deploy_problems(args, config):
    """ Main entrypoint for problem deployment """

    global deploy_config, port_map, inv_port_map
    deploy_config = config

    need_restart_xinetd = False

    try:
        user = getpwnam(deploy_config.default_user)
    except KeyError as e:
        logger.info("default_user '%s' does not exist. Creating the user now.",
                    deploy_config.default_user)
        create_user(deploy_config.default_user)

    if args.deployment_directory is not None and (len(args.problem_paths) > 1
                                                  or args.num_instances > 1):
        logger.error(
            "Cannot specify deployment directory if deploying multiple problems or instances."
        )
        raise FatalException

    if args.secret:
        deploy_config.deploy_secret = args.secret
        logger.warning(
            "Overriding deploy_secret with user supplied secret '%s'.",
            args.secret)

    problem_names = args.problem_paths

    if args.bundle:
        bundle_problems = []
        for bundle_path in args.problem_paths:
            if os.path.isfile(bundle_path):
                bundle = get_bundle(bundle_path)
                bundle_problems.extend(bundle["problems"])
            else:
                bundle_sources_path = get_bundle_root(bundle_path,
                                                      absolute=True)
                if os.path.isdir(bundle_sources_path):
                    bundle = get_bundle(bundle_sources_path)
                    bundle_problems.extend(bundle["problems"])
                else:
                    logger.error("Could not find bundle at '%s'.", bundle_path)
                    raise FatalException
        problem_names = bundle_problems

    # before deploying problems, load in port_map and already_deployed instances
    already_deployed = {}
    for path, problem in get_all_problems().items():
        already_deployed[path] = []
        for instance in get_all_problem_instances(path):
            already_deployed[path].append(instance["instance_number"])
            if "port" in instance:
                port_map[instance["port"]] = (problem["name"],
                                              instance["instance_number"])
                inv_port_map[(problem["name"],
                              instance["instance_number"])] = instance["port"]

    lock_file = join(HACKSPORTS_ROOT, "deploy.lock")
    if os.path.isfile(lock_file):
        logger.error(
            "Cannot deploy while other deployment in progress. If you believe this is an error, "
            "run 'shell_manager clean'")
        raise FatalException

    logger.debug("Obtaining deployment lock file %s", lock_file)
    with open(lock_file, "w") as f:
        f.write("1")

    if args.instances:
        instance_list = args.instances
    else:
        instance_list = list(range(0, args.num_instances))

    try:
        for problem_name in problem_names:
            if args.redeploy:
                todo_instance_list = instance_list
            else:
                # remove already deployed instances
                todo_instance_list = list(
                    set(instance_list) -
                    set(already_deployed.get(problem_name, [])))

            if args.dry and isdir(problem_name):
                need_restart_xinetd = deploy_problem(
                    problem_name,
                    instances=todo_instance_list,
                    test=args.dry,
                    deployment_directory=args.deployment_directory,
                    debug=args.debug,
                    restart_xinetd=False)
            elif isdir(join(get_problem_root(problem_name, absolute=True))):
                need_restart_xinetd = deploy_problem(
                    join(get_problem_root(problem_name, absolute=True)),
                    instances=todo_instance_list,
                    test=args.dry,
                    deployment_directory=args.deployment_directory,
                    debug=args.debug,
                    restart_xinetd=False)
            else:
                logger.error("Problem '%s' doesn't appear to be installed.",
                             problem_name)
                raise FatalException
    finally:
        # Restart xinetd unless specified. Service must be manually restarted
        if not args.no_restart and need_restart_xinetd:
            execute(["service", "xinetd", "restart"], timeout=60)

        logger.debug("Releasing lock file %s", lock_file)
        if not args.dry:
            os.remove(lock_file)
コード例 #56
0
class DirSheet(Sheet):
    'Sheet displaying directory, using ENTER to open a particular file.  Edited fields are applied to the filesystem.'
    rowtype = 'files'  # rowdef: (Path, stat)
    commands = [
        Command(ENTER, 'vd.push(openSource(cursorRow[0]))',
                'open current file as a new sheet', 'sheet-open-row'),
        Command('g' + ENTER,
                'for r in selectedRows: vd.push(openSource(r[0].resolve()))',
                'open selected files as new sheets', 'sheet-open-rows'),
        Command('^O', 'launchEditor(cursorRow[0].resolve())',
                'open current file in external $EDITOR', 'edit-row-external'),
        Command('g^O', 'launchEditor(*(r[0].resolve() for r in selectedRows))',
                'open selected files in external $EDITOR',
                'edit-rows-external'),
        Command('^S', 'save()', 'apply all changes on all rows',
                'sheet-specific-apply-edits'),
        Command('z^S', 'save(cursorRow)', 'apply changes to current row',
                'sheet-specific-apply-edits'),
        Command('z^R', 'undoMod(cursorRow); restat(cursorRow)',
                'undo pending changes to current row',
                'sheet-specific-apply-edits'),
        Command(
            'modify-delete-row',
            'if cursorRow not in toBeDeleted: toBeDeleted.append(cursorRow); cursorRowIndex += 1'
        ),
        Command('modify-delete-selected', 'deleteFiles(selectedRows)')
    ]
    columns = [
        # these setters all either raise or return None, so this is a non-idiomatic 'or' to squeeze in a restat
        DeferredSetColumn(
            'directory',
            getter=lambda col, row: row[0].parent.relpath(col.sheet.source.
                                                          resolve()),
            setter=lambda col, row, val: col.sheet.moveFile(row, val)),
        DeferredSetColumn(
            'filename',
            getter=lambda col, row: row[0].name + row[0].ext,
            setter=lambda col, row, val: col.sheet.renameFile(row, val)),
        Column(
            'ext',
            getter=lambda col, row: row[0].is_dir() and '/' or row[0].suffix),
        DeferredSetColumn('size',
                          type=int,
                          getter=lambda col, row: row[1].st_size,
                          setter=lambda col, row, val: os.truncate(
                              row[0].resolve(), int(val))),
        DeferredSetColumn(
            'modtime',
            type=date,
            getter=lambda col, row: row[1].st_mtime,
            setter=lambda col, row, val: os.utime(
                row[0].resolve(), times=((row[1].st_atime, float(val))))),
        DeferredSetColumn(
            'owner',
            width=0,
            getter=lambda col, row: pwd.getpwuid(row[1].st_uid).pw_name,
            setter=lambda col, row, val: os.chown(row[0].resolve(),
                                                  pwd.getpwnam(val).pw_uid, -1
                                                  )),
        DeferredSetColumn(
            'group',
            width=0,
            getter=lambda col, row: grp.getgrgid(row[1].st_gid).gr_name,
            setter=lambda col, row, val: os.chown(row[0].resolve(), -1,
                                                  grp.getgrnam(val).pw_gid)),
        DeferredSetColumn('mode',
                          width=0,
                          type=int,
                          fmtstr='{:o}',
                          getter=lambda col, row: row[1].st_mode),
        Column('filetype',
               width=40,
               cache=True,
               getter=lambda col, row: subprocess.Popen(
                   ['file', '--brief', row[0].resolve()],
                   stdout=subprocess.PIPE,
                   stderr=subprocess.PIPE).communicate()[0].strip()),
    ]
    colorizers = [
        Colorizer('cell', 4, lambda s, c, r, v: s.colorOwner(s, c, r, v)),
        Colorizer(
            'cell', 8, lambda s, c, r, v: options.color_change_pending
            if s.changed(c, r) else None),
        Colorizer(
            'row', 9, lambda s, c, r, v: options.color_delete_pending
            if r in s.toBeDeleted else None),
    ]
    nKeys = 2

    @staticmethod
    def colorOwner(sheet, col, row, val):
        path, st = row
        mode = st.st_mode
        ret = ''
        if col.name == 'group':
            if mode & stat.S_IXGRP: ret = 'bold '
            if mode & stat.S_IWGRP: return ret + 'green'
            if mode & stat.S_IRGRP: return ret + 'yellow'
        elif col.name == 'owner':
            if mode & stat.S_IXUSR: ret = 'bold '
            if mode & stat.S_IWUSR: return ret + 'green'
            if mode & stat.S_IRUSR: return ret + 'yellow'

    def changed(self, col, row):
        return isinstance(col, DeferredSetColumn) and col.changed(row)

    def deleteFiles(self, rows):
        for r in rows:
            if r not in self.toBeDeleted:
                self.toBeDeleted.append(r)

    def moveFile(self, row, val):
        fn = row[0].name + row[0].ext
        newpath = os.path.join(val, fn)
        if not newpath.startswith('/'):
            newpath = os.path.join(self.source.resolve(), newpath)

        parent = Path(newpath).parent
        if parent.exists():
            if not parent.is_dir():
                error('destination %s not a directory' % parent)
        else:
            with contextlib.suppress(FileExistsError):
                os.makedirs(parent.resolve())

        os.rename(row[0].resolve(), newpath)
        row[0] = Path(newpath)
        self.restat(row)

    def renameFile(self, row, val):
        newpath = row[0].with_name(val)
        os.rename(row[0].resolve(), newpath.resolve())
        row[0] = newpath

    def removeFile(self, row):
        path, _ = row
        if path.is_dir():
            os.rmdir(path.resolve())
        else:
            os.remove(path.resolve())

    def undoMod(self, row):
        for col in self.visibleCols:
            if col._cachedValues and id(row) in col._cachedValues:
                del col._cachedValues[id(row)]

        if row in self.toBeDeleted:
            self.toBeDeleted.remove(row)

    def save(self, *rows):
        changes = []
        deletes = {}
        for r in list(
                rows
                or self.rows):  # copy list because elements may be removed
            if r in self.toBeDeleted:
                deletes[id(r)] = r
            else:
                for col in self.visibleCols:
                    if self.changed(col, r):
                        changes.append((col, r))

        if not changes and not deletes:
            error('nothing to save')

        cstr = ''
        if changes:
            cstr += 'change %d attributes' % len(changes)

        if deletes:
            if cstr: cstr += ' and '
            cstr += 'delete %d files' % len(deletes)

        confirm('really %s? ' % cstr)

        self._commit(changes, deletes)

    @asyncthread
    def _commit(self, changes, deletes):
        oldrows = self.rows
        self.rows = []
        for r in oldrows:
            try:
                if id(r) in deletes:
                    self.removeFile(r)
                else:
                    self.rows.append(r)
            except Exception as e:
                exceptionCaught(e)

        for col, row in changes:
            try:
                col.realsetter(col, row, col._cachedValues[id(row)])
                self.restat(r)
            except Exception as e:
                exceptionCaught(e)

    @asyncthread
    def reload(self):
        self.toBeDeleted = []
        self.rows = []
        basepath = self.source.resolve()
        for folder, subdirs, files in os.walk(basepath):
            subfolder = folder[len(basepath) + 1:]
            if subfolder.startswith('.'): continue
            for fn in files:
                if fn.startswith('.'): continue
                p = Path(os.path.join(folder, fn))
                self.rows.append([p, p.stat()])
        self.rows.sort()

    def restat(self, row):
        row[1] = row[0].stat()
コード例 #57
0
ファイル: tools.py プロジェクト: lapineige/yunohost
def tools_ldapinit():
    """
    YunoHost LDAP initialization
    """

    with open("/usr/share/yunohost/yunohost-config/moulinette/ldap_scheme.yml"
              ) as f:
        ldap_map = yaml.load(f)

    from yunohost.utils.ldap import _get_ldap_interface

    ldap = _get_ldap_interface()

    for rdn, attr_dict in ldap_map["parents"].items():
        try:
            ldap.add(rdn, attr_dict)
        except Exception as e:
            logger.warn(
                "Error when trying to inject '%s' -> '%s' into ldap: %s" %
                (rdn, attr_dict, e))

    for rdn, attr_dict in ldap_map["children"].items():
        try:
            ldap.add(rdn, attr_dict)
        except Exception as e:
            logger.warn(
                "Error when trying to inject '%s' -> '%s' into ldap: %s" %
                (rdn, attr_dict, e))

    for rdn, attr_dict in ldap_map["depends_children"].items():
        try:
            ldap.add(rdn, attr_dict)
        except Exception as e:
            logger.warn(
                "Error when trying to inject '%s' -> '%s' into ldap: %s" %
                (rdn, attr_dict, e))

    admin_dict = {
        "cn": ["admin"],
        "uid": ["admin"],
        "description": ["LDAP Administrator"],
        "gidNumber": ["1007"],
        "uidNumber": ["1007"],
        "homeDirectory": ["/home/admin"],
        "loginShell": ["/bin/bash"],
        "objectClass":
        ["organizationalRole", "posixAccount", "simpleSecurityObject"],
        "userPassword": ["yunohost"],
    }

    ldap.update("cn=admin", admin_dict)

    # Force nscd to refresh cache to take admin creation into account
    subprocess.call(["nscd", "-i", "passwd"])

    # Check admin actually exists now
    try:
        pwd.getpwnam("admin")
    except KeyError:
        logger.error(m18n.n("ldap_init_failed_to_create_admin"))
        raise YunohostError("installation_failed")

    try:
        # Attempt to create user home folder
        subprocess.check_call(["mkhomedir_helper", "admin"])
    except subprocess.CalledProcessError:
        if not os.path.isdir("/home/{0}".format("admin")):
            logger.warning(m18n.n("user_home_creation_failed"), exc_info=1)

    logger.success(m18n.n("ldap_initialized"))
コード例 #58
0
    def __init__(
        self,
        filename,
        mode='a',
        maxBytes=0,
        backupCount=0,
        encoding=None,
        debug=False,
        delay=None,
        use_gzip=False,
        owner=None,
        chmod=None,
        umask=None,
        newline=None,
        terminator="\n",
        unicode_error_policy='ignore',
    ):
        """
        Open the specified file and use it as the stream for logging.

        :param filename: name of the log file to output to.
        :param mode: write mode: defaults to 'a' for text append
        :param maxBytes: rotate the file at this size in bytes
        :param backupCount: number of rotated files to keep before deleting.
        :param encoding: text encoding for logfile
        :param debug: add extra debug statements to this class (for development)
        :param delay: DEPRECATED: value is ignored
        :param use_gzip: automatically gzip rotated logs if available.
        :param owner: 2 element sequence with (user owner, group owner) of log files.  (Unix only)
        :param chmod: permission of log files.  (Unix only)
        :param umask: umask settings to temporarily make when creating log files.
            This is an alternative to chmod. It is mainly for Unix systems but
            can also be used on Windows. The Windows security model is more complex
            and this is not the same as changing access control entries.
        :param newline: None (default): use CRLF on Windows, LF on Unix. Set to '' for
        no translation, in which case the 'terminator' argument determines the line ending.
        :param terminator: set to '\r\n' along with newline='' to force Windows style
        newlines regardless of OS platform.
        :param unicode_error_policy: should be one of 'ignore', 'replace', 'strict'
        Determines what happens when a message is written to the log that the stream encoding
        doesn't support. Default is to ignore, i.e., drop the unusable characters.

        By default, the file grows indefinitely. You can specify particular
        values of maxBytes and backupCount to allow the file to rollover at
        a predetermined size.

        Rollover occurs whenever the current log file is nearly maxBytes in
        length. If backupCount is >= 1, the system will successively create
        new files with the same pathname as the base file, but with extensions
        ".1", ".2" etc. appended to it. For example, with a backupCount of 5
        and a base file name of "app.log", you would get "app.log",
        "app.log.1", "app.log.2", ... through to "app.log.5". The file being
        written to is always "app.log" - when it gets filled up, it is closed
        and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc.
        exist, then they are renamed to "app.log.2", "app.log.3" etc.
        respectively.

        If maxBytes is zero, rollover never occurs.

        This log handler assumes that all concurrent processes logging to a
        single file will are using only this class, and that the exact same
        parameters are provided to each instance of this class.  If, for
        example, two different processes are using this class, but with
        different values for 'maxBytes' or 'backupCount', then odd behavior is
        expected. The same is true if this class is used by one application, but
        the RotatingFileHandler is used by another.
        """
        self.stream = None
        self.stream_lock = None
        self.owner = owner
        self.chmod = chmod
        self.umask = umask
        self._set_uid = None
        self._set_gid = None
        self.use_gzip = True if gzip and use_gzip else False
        self._rotateFailed = False
        self.maxBytes = maxBytes
        self.backupCount = backupCount
        self.newline = newline

        self._debug = debug
        self.use_gzip = True if gzip and use_gzip else False
        self.gzip_buffer = 8096

        if unicode_error_policy not in ('ignore', 'replace', 'strict'):
            unicode_error_policy = 'ignore'
            warnings.warn(
                "Invalid unicode_error_policy for concurrent_log_handler: "
                "must be ignore, replace, or strict. Defaulting to ignore.",
                UserWarning)
        self.unicode_error_policy = unicode_error_policy

        if delay not in (None, True):
            warnings.warn(
                'parameter delay is now ignored and implied as True, '
                'please remove from your config.', DeprecationWarning)

        # Construct the handler with the given arguments in "delayed" mode
        # because we will handle opening the file as needed. File name
        # handling is done by FileHandler since Python 2.5.
        super(ConcurrentRotatingFileHandler, self).__init__(filename,
                                                            mode,
                                                            encoding=encoding,
                                                            delay=True)

        self.terminator = terminator or "\n"

        if owner and os.chown and pwd and grp:
            self._set_uid = pwd.getpwnam(self.owner[0]).pw_uid
            self._set_gid = grp.getgrnam(self.owner[1]).gr_gid

        self.lockFilename = self.getLockFilename()
        self.is_locked = False
コード例 #59
0
#!/usr/bin/env python

# on veut que tout dans ce script soit executé par l'utilisateur nobody
# dans le groupe auf-d-u
import pwd, grp, os
nobody = pwd.getpwnam('nobody').pw_uid
group = grp.getgrnam('auf-d-u').gr_gid
os.setgid(group)
os.setuid(nobody)

from django.core.management import execute_manager
from aufusers import settings
if __name__ == "__main__":
    execute_manager(settings)

コード例 #60
0
def get_user_fullname(user):
    try:
        return pwd.getpwnam(user)[4]
    except KeyError:
        return '<unknown user name>'