Пример #1
0
def setupLogs(options):
    logger = logging.getLogger()

    if options.trace or options.logfile:
        loglevel = getattr(logging, options.loglevel.upper())

        f = logging.Formatter('%(asctime)s %(filename)s %(levelname)s %(message)s', datefmt='%Y-%m-%dT%H:%M:%S')

        logger.setLevel(loglevel)

        if options.trace:
            s = logging.StreamHandler()
            s.setLevel(loglevel)
            s.setFormatter(f)

            logging.getLogger('').addHandler(s)

        if options.logfile:
            fh = logging.FileHandler(options.logfile)
            fh.setLevel(loglevel)
            fh.setFormatter(f)

            logging.getLogger('').addHandler(fh)

    logger.debug('workdir = {}'.format(options.workdir))
    logger.debug('oedir = {}'.format(options.oedir))
    logger.debug('svnloc = {}'.format(options.svnloc))
    logger.debug('attemptsdir = {}'.format(options.attemptsdir))
    logger.debug('uid = {} = {}'.format(os.getuid(), pwd.getpwuid(os.getuid()).pw_name))
    logger.debug('euid = {} = {}'.format(os.geteuid(), pwd.getpwuid(os.geteuid()).pw_name))
    logger.debug('gid = {} = {}'.format(os.getgid(), grp.getgrgid(os.getgid()).gr_name))
    logger.debug('egid = {} = {}'.format(os.getegid(), grp.getgrgid(os.getegid()).gr_name))

    return logger
Пример #2
0
    def test_executeAsUser_Unix(self):
        """
        Test executing as a different user.
        """
        initial_uid, initial_gid = os.geteuid(), os.getegid()
        initial_groups = os.getgroups()
        test_user = mk.getTestUser(u'normal')
        self.assertNotEqual(
            sorted(self.getGroupsIDForTestAccount()),
            sorted(os.getgroups()),
            )

        with system_users.executeAsUser(username=test_user.name):
            import pwd
            import grp
            uid, gid = os.geteuid(), os.getegid()
            impersonated_username = pwd.getpwuid(uid)[0].decode('utf-8')
            impersonated_groupname = grp.getgrgid(gid)[0].decode('utf-8')
            impersonated_groups = os.getgroups()
            self.assertEqual(test_user.name, impersonated_username)
            self.assertEqual(TEST_ACCOUNT_GROUP, impersonated_groupname)
            self.assertNotEqual(initial_uid, uid)
            self.assertNotEqual(initial_gid, gid)
            self.assertNotEqual(initial_groups, impersonated_groups)
            if self.os_name != 'osx':
                # On OSX newer than 10.5 get/set groups are useless.
                self.assertEqual(
                    sorted(self.getGroupsIDForTestAccount()),
                    sorted(impersonated_groups),
                    )

        self.assertEqual(initial_uid, os.geteuid())
        self.assertEqual(initial_gid, os.getegid())
        self.assertEqual(initial_groups, os.getgroups())
Пример #3
0
def access(filename,mode):
    if mode == os.F_OK: return exists(filename)

    st = stat(filename)
    filemode = st.st_mode
    uid = st.st_uid
    gid = st.st_gid
    if mode & os.R_OK:
        rOK = ( filemode & statconsts.S_IROTH ) or \
              ( filemode & statconsts.S_IRGRP and os.getgid()  == gid ) or \
              ( filemode & statconsts.S_IRUSR and os.getuid()  == uid ) or \
              ( filemode & statconsts.S_ISGID and os.getegid() == gid ) or \
              ( filemode & statconsts.S_ISUID and os.geteuid() == uid )
    else:
        rOK = True

    if mode & os.W_OK:
        wOK = ( filemode & statconsts.S_IWOTH ) or \
              ( filemode & statconsts.S_IWGRP and os.getgid()  == gid ) or \
              ( filemode & statconsts.S_IWUSR and os.getuid()  == uid ) or \
              ( filemode & statconsts.S_ISGID and os.getegid() == gid ) or \
              ( filemode & statconsts.S_ISUID and os.geteuid() == uid )
    else:
        wOK = True

    if mode & os.X_OK:
        xOK = ( filemode & statconsts.S_IXOTH ) or \
              ( filemode & statconsts.S_IXGRP and os.getgid()  == gid ) or \
              ( filemode & statconsts.S_IXUSR and os.getuid()  == uid ) or \
              ( filemode & statconsts.S_ISGID and os.getegid() == gid ) or \
              ( filemode & statconsts.S_ISUID and os.geteuid() == uid )
    else:
        xOK = True

    return rOK and wOK and xOK
Пример #4
0
 def test_mode(self):
     path = pjoin(self.dir, 'mode', 'mode')
     assert osutils.ensure_dirs(path, mode=0o700)
     self.check_dir(path, os.geteuid(), os.getegid(), 0o700)
     # unrestrict it
     osutils.ensure_dirs(path)
     self.check_dir(path, os.geteuid(), os.getegid(), 0o777)
Пример #5
0
def juju(host, args):
    run_command('juju --version')
    logging.info("Juju home is set to {}".format(host.tmp_juju_home))
    try:
        for model in host.models:
            run_command(
                'juju bootstrap --show-log -e {} --constraints mem=4G'.format(
                    model))
            run_command('juju set-constraints -e {} mem=2G'.format(model))
        yield
    finally:
        if os.getegid() == 111:
            run_command('sudo chown -R jenkins:jenkins {}'.format(host.root))
        else:
            run_command('sudo chown -R {}:{} {}'.format(
                os.getegid(), os.getpgrp(), host.root))
        error = None
        copy_remote_logs(host.models, args)
        for model in host.models:
            try:
                run_command(
                    'juju destroy-environment --force --yes {}'.format(model))
            except subprocess.CalledProcessError as e:
                error = e
                logging.error("Error destroy env failed: {}".format(model))
        if error:
            raise error
Пример #6
0
def juju(host, args):
    run_command('juju --version')
    logging.info("Juju home is set to {}".format(host.tmp_juju_home))
    bootstrapped = []
    try:
        for model in host.models:
            try:
                run_command(
                    'juju bootstrap --show-log -e {} --constraints mem=4G'.
                    format(model))
                run_command('juju set-constraints -e {} mem=2G'.format(model))
            except subprocess.CalledProcessError:
                logging.error('Bootstrapping failed on {}'.format(model))
                continue
            bootstrapped.append(model)
        host.models = bootstrapped
        yield
    finally:
        if os.getegid() == 111:
            run_command('sudo chown -R jenkins:jenkins {}'.format(host.root))
        else:
            run_command('sudo chown -R {}:{} {}'.format(
                os.getegid(), os.getpgrp(), host.root))
        try:
            copy_remote_logs(host.models, args)
        except subprocess.CalledProcessError:
            logging.error('Getting logs failed.')
        for model in host.models:
            try:
                run_command(
                    'juju destroy-environment --force --yes {}'.format(model))
            except subprocess.CalledProcessError:
                logging.error("Error destroy env failed: {}".format(model))
Пример #7
0
 def test_create_unwritable_subdir(self):
     path = pjoin(self.dir, 'restricted', 'restricted')
     # create the subdirs without 020 first
     assert osutils.ensure_dirs(os.path.dirname(path))
     assert osutils.ensure_dirs(path, mode=0o020)
     self.check_dir(path, os.geteuid(), os.getegid(), 0o020)
     # unrestrict it
     osutils.ensure_dirs(path)
     self.check_dir(path, os.geteuid(), os.getegid(), 0o777)
Пример #8
0
def _make_tarinfo(name, size):
    tarinfo = tarfile.TarInfo(name=name)
    tarinfo.size = size
    tarinfo.mtime = time.time()
    tarinfo.mode = 0o660
    tarinfo.type = tarfile.REGTYPE
    tarinfo.uid = os.geteuid()
    tarinfo.gid = os.getegid()
    tarinfo.uname = pwd.getpwuid(os.geteuid()).pw_name
    tarinfo.gname = grp.getgrgid(os.getegid()).gr_name
    return tarinfo
Пример #9
0
    def test_create(self):
        """
        Test basic pool creation.

        :avocado: tags=pool,poolcreate,simplecreate
        """
        # Accumulate a list of pass/fail indicators representing what is
        # expected for each parameter then "and" them to determine the
        # expected result of the test
        expected_for_param = []
        modelist = self.params.get("mode", '/run/tests/modes/*')
        mode = modelist[0]
        expected_for_param.append(modelist[1])

        uidlist = self.params.get("uid", '/run/tests/uids/*', os.geteuid())
        if uidlist[0] == 'valid':
            uid = os.geteuid()
        else:
            uid = uidlist[0]
        expected_for_param.append(uidlist[1])

        gidlist = self.params.get("gid", '/run/tests/gids/*', os.getegid())
        if gidlist[0] == 'valid':
            gid = os.getegid()
        else:
            gid = gidlist[0]
        expected_for_param.append(gidlist[1])

        setidlist = self.params.get("setname", '/run/tests/setnames/*')
        setid = setidlist[0]
        expected_for_param.append(setidlist[1])

        expected_result = 'PASS'
        for result in expected_for_param:
            if result == 'FAIL':
                expected_result = 'FAIL'
                break

        try:
            self.pool = DaosPool(self.context)
            self.pool.create(mode, uid, gid, 1073741824, setid)
            if expected_result in ['FAIL']:
                self.fail("Test was expected to fail but it passed.\n")

        except DaosApiError as exc:
            print(exc)
            print(traceback.format_exc())
            if expected_result not in ['FAIL']:
                self.fail("Test was expected to pass but it failed.\n")
Пример #10
0
 def test_gid(self):
     # abuse the portage group as secondary group
     try:
         portage_gid = grp.getgrnam('portage').gr_gid
     except KeyError:
         pytest.skip('the portage group does not exist')
     if portage_gid not in os.getgroups():
         pytest.skip('you are not in the portage group')
     path = pjoin(self.dir, 'group', 'group')
     assert osutils.ensure_dirs(path, gid=portage_gid)
     self.check_dir(path, os.geteuid(), portage_gid, 0o777)
     assert osutils.ensure_dirs(path)
     self.check_dir(path, os.geteuid(), portage_gid, 0o777)
     assert osutils.ensure_dirs(path, gid=os.getegid())
     self.check_dir(path, os.geteuid(), os.getegid(), 0o777)
Пример #11
0
def is_effective_group(group_id_or_name):
    """Returns True if group_id_or_name is effective group (id/name)."""
    egid = os.getegid()
    if str(group_id_or_name) == str(egid):
        return True
    effective_group_name = grp.getgrgid(egid).gr_name
    return group_id_or_name == effective_group_name
Пример #12
0
def CreateTgz(Archive, Filelist):
  """Create a .tgz of the listed files, but take care with ../ files.

Return 0 on success, error code on error."""

  if not Util.CanWrite(os.environ["HOME"], os.geteuid(), os.getegid(), 0):
    CgiUtil.TermError("Can't write.", "No write permissions.",
      "create backup", CgiUtil.FileDetails("home directory",
      os.environ["HOME"]), "Check file permissions in home directory.")
  Files = []
  Parents = []
  for Filename in Filelist:
    SrcFn = os.path.join(os.environ["HOME"], Filename)
    if os.path.isfile(SrcFn):
      if (Filename[:len(Dict["Parent"])] == Dict["Parent"]) and \
        (Filename[:len(Dict["Home"])] != Dict["Home"]):
        try:
          os.mkdir(os.path.join(os.environ["HOME"], "%(Parent)s"))
        except OSError:
          pass
        NewFilename = "%(Parent)s" + Filename[len(Dict["Parent"]):]
        DstFn = os.path.join(os.environ["HOME"], NewFilename)
        Parents.append((SrcFn, DstFn))
        os.rename(SrcFn, DstFn)
        Files.append(NewFilename)
      else:
        Files.append(Filename)
  TarCmd = [PVars[("NoOverride", "WhichTar")], "-C", os.environ["HOME"],
    "-czf", Archive] + Files
  try:
    Util.RunTask(TarCmd)
  except OSError, ( eno, estr ):
    CgiUtil.TermError("CreateTgz failed.", "Error: %s (%d)" % (estr, eno),
      "create backup", " ".join(TarCmd),
      "Check file permissions in home directory.")
Пример #13
0
def effectively_readable(path):
    import os, stat

    uid = os.getuid()
    euid = os.geteuid()
    gid = os.getgid()
    egid = os.getegid()

    # This is probably true most of the time, so just let os.access()
    # handle it.  Avoids potential bugs in the rest of this function.
    if uid == euid and gid == egid:
        return os.access(path, os.R_OK)

    st = os.stat(path)

    # This may be wrong depending on the semantics of your OS.
    # i.e. if the file is -------r--, does the owner have access or not?
    if st.st_uid == euid:
        return st.st_mode & stat.S_IRUSR != 0

    # See comment for UID check above.
    groups = os.getgroups()
    if st.st_gid == egid or st.st_gid in groups:
        return st.st_mode & stat.S_IRGRP != 0

    return st.st_mode & stat.S_IROTH != 0
Пример #14
0
def maybe_drop_privileges(uid=None, gid=None):
    """Change process privileges to new user/group.

    If UID and GID is specified, the real user/group is changed.

    If only UID is specified, the real user is changed, and the group is
    changed to the users primary group.

    If only GID is specified, only the group is changed.
    """
    if sys.platform == 'win32':
        return
    if os.geteuid():
        # no point trying to setuid unless we're root.
        if not os.getuid():
            raise SecurityError('contact support')
    uid = uid and parse_uid(uid)
    gid = gid and parse_gid(gid)

    if uid:
        _setuid(uid, gid)
    else:
        gid and setgid(gid)

    if uid and not os.getuid() and not os.geteuid():
        raise SecurityError('Still root uid after drop privileges!')
    if gid and not os.getgid() and not os.getegid():
        raise SecurityError('Still root gid after drop privileges!')
Пример #15
0
 def _runAsUser(self, f, *args, **kw):
     euid = os.geteuid()
     egid = os.getegid()
     groups = os.getgroups()[:16]
     uid, gid = self.getUserGroupId()
     os.setegid(0)
     os.seteuid(0)
     os.setgroups(self.getOtherGroups())
     os.setegid(gid)
     os.seteuid(uid)
     try:
         f = iter(f)
     except TypeError:
         f = [(f, args, kw)]
     try:
         for i in f:
             func = i[0]
             args = len(i)>1 and i[1] or ()
             kw = len(i)>2 and i[2] or {}
             r = func(*args, **kw)
     finally:
         os.setegid(0)
         os.seteuid(0)
         os.setgroups(groups)
         os.setegid(egid)
         os.seteuid(euid)
     return r
Пример #16
0
def _create_new_key(keystone_user_id, keystone_group_id):
    """Securely create a new encryption key.

    Create a new key that is readable by the Keystone group and Keystone user.
    """
    key = fernet.Fernet.generate_key()  # key is bytes

    # This ensures the key created is not world-readable
    old_umask = os.umask(0o177)
    if keystone_user_id and keystone_group_id:
        old_egid = os.getegid()
        old_euid = os.geteuid()
        os.setegid(keystone_group_id)
        os.seteuid(keystone_user_id)
    elif keystone_user_id or keystone_group_id:
        LOG.warning(_LW(
            'Unable to change the ownership of the new key without a keystone '
            'user ID and keystone group ID both being provided: %s') %
            CONF.fernet_tokens.key_repository)
    # Determine the file name of the new key
    key_file = os.path.join(CONF.fernet_tokens.key_repository, '0')
    try:
        with open(key_file, 'w') as f:
            f.write(key.decode('utf-8'))  # convert key to str for the file.
    finally:
        # After writing the key, set the umask back to it's original value. Do
        # the same with group and user identifiers if a Keystone group or user
        # was supplied.
        os.umask(old_umask)
        if keystone_user_id and keystone_group_id:
            os.seteuid(old_euid)
            os.setegid(old_egid)

    LOG.info(_LI('Created a new key: %s'), key_file)
Пример #17
0
    def fork_worker(self, wid):
        if not self.is_master:
            self.log.warn("tried to fork a worker from a worker")
            return True

        tmpfname = self.control_path(self.HEALTHFILE % wid)
        tmpfd = os.open(tmpfname, os.O_RDONLY)
        if self.worker_uid is not None:
            os.fchown(tmpfd, self.worker_uid, os.getegid())

        pid = os.fork()

        if pid and self.is_master:
            self.log.info("worker forked: %d" % pid)
            self._worker_forked(wid, pid, tmpfd, tmpfname)
            return False

        if self.workers is None:
            self.log.error("forked a worker from a worker, exiting")
            sys.exit(1)

        self._worker_postfork(wid, pid, tmpfd)

        self.server.serve()
        return True
Пример #18
0
    def print_advice(self, keyword):
        advice = getattr(advise_api.Advice, keyword, None)

        # Ensure that Configuration class for given --setup option value exists
        if advice is None:
            raise ValidationError(
                name="advice",
                error="No instructions are available for '{con}'. "
                      "See the list of available configuration "
                      "by invoking the ipa-advise command with no argument."
                      .format(con=keyword.replace('_', '-')))

        # Check whether root privileges are needed
        if advice.require_root and os.getegid() != 0:
            raise admintool.ScriptError(
                'Must be root to get advice for {adv}'
                .format(adv=keyword.replace('_', '-')), 1)

        # Print out nicely formatted header
        self.print_header(advice.description, print_shell=True)

        # Set options so that plugin can use verbose/quiet options
        advice.set_options(self.options)

        # Print out the actual advice
        api.Backend.rpcclient.connect()
        advice.get_info()
        api.Backend.rpcclient.disconnect()
        for line in advice.log.content:
            print(line)
Пример #19
0
def connect(ldapi=False, realm=None, fqdn=None, dm_password=None, pw_name=None):
    """Create a connection for updates"""
    if ldapi:
        conn = ipaldap.IPAdmin(ldapi=True, realm=realm, decode_attrs=False)
    else:
        conn = ipaldap.IPAdmin(fqdn, ldapi=False, realm=realm, decode_attrs=False)
    try:
        if dm_password:
            conn.do_simple_bind(binddn=DN(('cn', 'directory manager')),
                                bindpw=dm_password)
        elif os.getegid() == 0:
            try:
                # autobind
                conn.do_external_bind(pw_name)
            except errors.NotFound:
                # Fall back
                conn.do_sasl_gssapi_bind()
        else:
            conn.do_sasl_gssapi_bind()
    except (ldap.CONNECT_ERROR, ldap.SERVER_DOWN):
        raise RuntimeError("Unable to connect to LDAP server %s" % fqdn)
    except ldap.INVALID_CREDENTIALS:
        raise RuntimeError(
            "The password provided is incorrect for LDAP server %s" % fqdn)
    except ldap.LOCAL_ERROR as e:
        raise RuntimeError('%s' % e.args[0].get('info', '').strip())
    return conn
Пример #20
0
    def __init__(self, keepAtShutdown=False):
        # We'll put all our temporary stuff under one dir so that we
        # can clean it all up at the end.

        parent_dir = tempfile.tempdir or os.environ.get("TMP", "/tmp")
        prefix = "watchmantest"

        self.temp_dir = path.get_canonical_filesystem_path(
            tempfile.mkdtemp(dir=parent_dir, prefix=prefix)
        )

        if os.name != "nt":
            # On some platforms, setting the setgid bit on a directory doesn't
            # work if the user isn't a member of the directory's group. Set the
            # group explicitly to avoid this.
            os.chown(self.temp_dir, -1, os.getegid())
            # Some environments have a weird umask that can leave state
            # directories too open and break tests.
            os.umask(0o022)
        # Redirect all temporary files to that location
        tempfile.tempdir = self.temp_dir

        self.keep = keepAtShutdown

        def cleanup():
            if self.keep:
                sys.stdout.write("Preserving output in %s\n" % self.temp_dir)
                return
            self._retry_rmtree(self.temp_dir)

        atexit.register(cleanup)
Пример #21
0
 def test_file(self):
     """
     An existing file has these attributes
     """
     root = FilePath(self.mktemp())
     root.setContent('the content')
     root.chmod(0777)
     
     stdout, stderr, code = self.runScript(['inspect'], json.dumps({
         'kind': 'file',
         'path': root.path,
     }))
     data = json.loads(stdout)
     
     self.assertEqual(data['kind'], 'file')
     self.assertEqual(data['path'], root.path)
     self.assertEqual(data['exists'], True)
     self.assertEqual(data['filetype'], 'file')
     self.assertEqual(data['owner'], pwd.getpwuid(os.geteuid()).pw_name)
     self.assertEqual(data['group'], grp.getgrgid(os.getegid()).gr_name)
     self.assertEqual(data['perms'], '0777')
     root.restat()
     self.assertEqual(data['ctime'], int(root.statinfo.st_ctime))
     self.assertEqual(type(data['ctime']), int)
     self.assertEqual(data['mtime'], int(root.statinfo.st_mtime))
     self.assertEqual(type(data['mtime']), int)
     self.assertEqual(data['atime'], int(root.statinfo.st_atime))
     self.assertEqual(type(data['atime']), int)
     
     self.assertEqual(data['sha1'], sha1('the content').hexdigest())
     self.assertEqual(data['size'], len('the content'))
Пример #22
0
def connect(ldapi=False, realm=None, fqdn=None, dm_password=None):
    """Create a connection for updates"""
    ldap_uri = ipaldap.get_ldap_uri(fqdn, ldapi=ldapi, realm=realm)
    conn = ipaldap.LDAPClient(ldap_uri, decode_attrs=False)
    try:
        if dm_password:
            conn.simple_bind(bind_dn=ipaldap.DIRMAN_DN,
                             bind_password=dm_password)
        elif os.getegid() == 0:
            try:
                # autobind
                conn.external_bind()
            except errors.NotFound:
                # Fall back
                conn.gssapi_bind()
        else:
            conn.gssapi_bind()
    except (ldap.CONNECT_ERROR, ldap.SERVER_DOWN):
        raise RuntimeError("Unable to connect to LDAP server %s" % fqdn)
    except ldap.INVALID_CREDENTIALS:
        raise RuntimeError(
            "The password provided is incorrect for LDAP server %s" % fqdn)
    except ldap.LOCAL_ERROR as e:
        raise RuntimeError('%s' % e.args[0].get('info', '').strip())
    return conn
Пример #23
0
    def demote(self, uid):
        try:
            username = pwd.getpwuid(uid).pw_name
            gid = pwd.getpwuid(uid).pw_gid
        except KeyError:
            username = None
            gid = uid

        if os.getuid() == uid:
            return
        else:
            if os.getuid() != 0:
                logging.warn('Running as a limited user, setuid() unavailable!')
                return

        logging.info(
            'Worker %s is demoting to UID %s / GID %s...',
            os.getpid(),
            uid,
            gid
        )

        groups = [
            g.gr_gid
            for g in grp.getgrall()
            if username in g.gr_mem or g.gr_gid == gid
        ]
        os.setgroups(groups)
        os.setgid(gid)
        os.setuid(uid)
        logging.info(
            '...done, new EUID %s EGID %s',
            os.geteuid(),
            os.getegid()
        )
Пример #24
0
 def test_directory(self):
     """
     A directory can exist
     """
     root = FilePath(self.mktemp())
     root.makedirs()
     root.chmod(0777)
     
     stdout, stderr, code = self.runScript(['inspect'], json.dumps({
         'kind': 'file',
         'path': root.path,
     }))
     data = json.loads(stdout)
     self.assertEqual(data['kind'], 'file')
     self.assertEqual(data['path'], root.path)
     self.assertEqual(data['exists'], True)
     self.assertEqual(data['filetype'], 'dir')
     self.assertEqual(data['owner'], pwd.getpwuid(os.geteuid()).pw_name)
     self.assertEqual(data['group'], grp.getgrgid(os.getegid()).gr_name)
     self.assertEqual(data['perms'], '0777')
     root.restat()
     self.assertEqual(data['ctime'], int(root.statinfo.st_ctime))
     self.assertEqual(type(data['ctime']), int)
     self.assertEqual(data['mtime'], int(root.statinfo.st_mtime))
     self.assertEqual(type(data['mtime']), int)
     self.assertEqual(data['atime'], int(root.statinfo.st_atime))
     self.assertEqual(type(data['atime']), int)
Пример #25
0
    def _init_backup_repo_path(self):
        remotefsclient = remotefs_brick.RemoteFsClient(
            'glusterfs',
            self._root_helper,
            glusterfs_mount_point_base=self.backup_mount_point_base)
        remotefsclient.mount(self.backup_share)

        # Ensure we can write to this share
        mount_path = remotefsclient.get_mount_point(self.backup_share)

        group_id = os.getegid()
        current_group_id = utils.get_file_gid(mount_path)
        current_mode = utils.get_file_mode(mount_path)

        if group_id != current_group_id:
            cmd = ['chgrp', group_id, mount_path]
            self._execute(*cmd, root_helper=self._root_helper,
                          run_as_root=True)

        if not (current_mode & stat.S_IWGRP):
            cmd = ['chmod', 'g+w', mount_path]
            self._execute(*cmd, root_helper=self._root_helper,
                          run_as_root=True)

        return mount_path
Пример #26
0
 def entry_point(argv):
     import os
     print "uid is %s" % os.getuid()
     print "euid is %s" % os.geteuid()
     print "gid is %s" % os.getgid()
     print "egid is %s" % os.getegid()
     return 0
Пример #27
0
def createUser():
    """
    Linux user will be created
    """
    _name = request.form['inputName']

    if find_user(_name):
        return json.dumps({'message':'User already exists !'})

    if not check_valid(_name):
        return json.dumps({'message':'User can be created entered length should be less than 32 !'})

    _password = request.form['inputPassword']

    # Check if user to be created with sudo rights
    '''if _sudo:
        os.system("echo RANDOM | sudo -S adduser "+_name+" sudo ")
        return json.dumps({'message':'User created successfully !'})'''

    enc_pass = crypt.crypt(_password,"22")

    if os.getegid()!=0:
        os.system("echo "+SUDO_PASSWORD+" | sudo -S useradd -p "+enc_pass+" "+_name)

    else:
        os.system("useradd -p "+enc_pass+" "+_name)

    return json.dumps({'message':'User created successfully !'})
Пример #28
0
    def __init__(self, stream, gate):
        self.stream = stream
        self.gate = gate
        aj.master = False
        os.setpgrp()
        setproctitle.setproctitle(
            '%s worker [%s]' % (
                sys.argv[0],
                self.gate.name
            )
        )
        set_log_params(tag=self.gate.log_tag)
        init_log_forwarding(self.send_log_event)

        logging.info(
            'New worker "%s" PID %s, EUID %s, EGID %s',
            self.gate.name,
            os.getpid(),
            os.geteuid(),
            os.getegid(),
        )

        self.context = Context(parent=aj.context)
        self.context.session = self.gate.session
        self.context.worker = self
        self.handler = HttpMiddlewareAggregator([
            AuthenticationMiddleware.get(self.context),
            CentralDispatcher.get(self.context),
        ])

        self._master_config_reloaded = Event()
Пример #29
0
    def __deliver_message_maildir(self, uid, gid, msg, delivered_to, received,
                                  stdout, stderr):
        '''Delivery method run in separate child process.
        '''
        try:
            if os.name == 'posix':
                if uid:
                    change_uidgid(None, uid, gid)
                if os.geteuid() == 0:
                    raise getmailConfigurationError(
                        'refuse to deliver mail as root'
                    )
                if os.getegid() == 0:
                    raise getmailConfigurationError(
                        'refuse to deliver mail as GID 0'
                    )
            f = deliver_maildir(
		self.conf['path'], msg._Message__raw,
                self.hostname, self.dcount, self.conf['filemode']
            )
            stdout.write(f)
            stdout.flush()
            os.fsync(stdout.fileno())
            os._exit(0)
        except StandardError, o:
            # Child process; any error must cause us to exit nonzero for parent
            # to detect it
            stderr.write('maildir delivery process failed (%s)' % o)
            stderr.flush()
            os.fsync(stderr.fileno())
            os._exit(127)
Пример #30
0
    def _continue_original_flow(
            self, r1, w1, r2, w2, namespaces, ns_bind_dir,
            setgroups, maproot, users_map, groups_map):
        if setgroups == "allow" and maproot:
            raise NamespaceSettingError()

        if maproot:
            uid = os.geteuid()
            gid = os.getegid()

        os.close(w1)
        os.close(r2)

        child_pid = os.read(r1, 64)
        os.close(r1)
        try:
            child_pid = int(child_pid)
        except ValueError:
            raise RuntimeError("failed to get the child pid")

        if "user" in namespaces:
            self.setgroups_control(setgroups, child_pid)
            _write_to_uid_and_gid_map(maproot, users_map,
                                          groups_map, child_pid)

        if ns_bind_dir is not None and "mount" in namespaces:
            self.bind_ns_files(child_pid, namespaces, ns_bind_dir)
        os.write(w2, chr(_ACLCHAR))
        os.close(w2)
Пример #31
0
def unzip(zip_file,
          dest,
          excludes=None,
          options=None,
          template=None,
          runas=None,
          trim_output=False,
          password=None,
          extract_perms=True):
    '''
    Uses the ``zipfile`` Python module to unpack zip files

    .. versionchanged:: 2015.5.0
        This function was rewritten to use Python's native zip file support.
        The old functionality has been preserved in the new function
        :mod:`archive.cmd_unzip <salt.modules.archive.cmd_unzip>`. For versions
        2014.7.x and earlier, see the :mod:`archive.cmd_zip
        <salt.modules.archive.cmd_zip>` documentation.

    zip_file
        Path of zip file to be unpacked

    dest
        The destination directory into which the file should be unpacked

    excludes : None
        Comma-separated list of files not to unpack. Can also be passed in a
        Python list.

    options
        This options are only used when ``unzip`` binary is used. In this
        function is ignored.

        .. versionadded:: 2016.3.1

    template : None
        Can be set to 'jinja' or another supported template engine to render
        the command arguments before execution:

        .. code-block:: bash

            salt '*' archive.unzip template=jinja /tmp/zipfile.zip /tmp/{{grains.id}}/ excludes=file_1,file_2

    runas : None
        Unpack the zip file as the specified user. Defaults to the user under
        which the minion is running.

    trim_output : False
        The number of files we should output on success before the rest are trimmed, if this is
        set to True then it will default to 100

    CLI Example:

    .. code-block:: bash

        salt '*' archive.unzip /tmp/zipfile.zip /home/strongbad/ excludes=file_1,file_2

    password
        Password to use with password protected zip files

        .. note::
            The password will be present in the events logged to the minion log
            file at the ``debug`` log level. If the minion is logging at
            ``debug`` (or more verbose), then be advised that the password will
            appear in the log.

        .. versionadded:: 2016.3.0

    extract_perms : True
        The Python zipfile_ module does not extract file/directory attributes
        by default. When this argument is set to ``True``, Salt will attempt to
        apply the file permission attributes to the extracted files/folders.

        On Windows, only the read-only flag will be extracted as set within the
        zip file, other attributes (i.e. user/group permissions) are ignored.

        Set this argument to ``False`` to disable this behavior.

        .. versionadded:: 2016.11.0

    .. _zipfile: https://docs.python.org/2/library/zipfile.html

    CLI Example:

    .. code-block:: bash

        salt '*' archive.unzip /tmp/zipfile.zip /home/strongbad/ password='******'
    '''
    if not excludes:
        excludes = []
    if runas:
        euid = os.geteuid()
        egid = os.getegid()
        uinfo = __salt__['user.info'](runas)
        if not uinfo:
            raise SaltInvocationError(
                "User '{0}' does not exist".format(runas))

    zip_file, dest = _render_filenames(zip_file, dest, None, template)

    if runas and (euid != uinfo['uid'] or egid != uinfo['gid']):
        # Change the egid first, as changing it after the euid will fail
        # if the runas user is non-privileged.
        os.setegid(uinfo['gid'])
        os.seteuid(uinfo['uid'])

    try:
        # Define cleaned_files here so that an exception will not prevent this
        # variable from being defined and cause a NameError in the return
        # statement at the end of the function.
        cleaned_files = []
        with contextlib.closing(zipfile.ZipFile(zip_file, "r")) as zfile:
            files = zfile.namelist()

            if isinstance(excludes, six.string_types):
                excludes = [x.strip() for x in excludes.split(',')]
            elif isinstance(excludes, (float, six.integer_types)):
                excludes = [str(excludes)]

            cleaned_files.extend([x for x in files if x not in excludes])
            for target in cleaned_files:
                if target not in excludes:
                    if salt.utils.is_windows() is False:
                        info = zfile.getinfo(target)
                        # Check if zipped file is a symbolic link
                        if stat.S_ISLNK(info.external_attr >> 16):
                            source = zfile.read(target)
                            os.symlink(source, os.path.join(dest, target))
                            continue
                    zfile.extract(target, dest, password)
                    if extract_perms:
                        perm = zfile.getinfo(target).external_attr >> 16
                        if perm == 0:
                            umask_ = salt.utils.files.get_umask()
                            if target.endswith('/'):
                                perm = 0o777 & ~umask_
                            else:
                                perm = 0o666 & ~umask_
                        os.chmod(os.path.join(dest, target), perm)
    except Exception as exc:
        if runas:
            os.seteuid(euid)
            os.setegid(egid)
        # Wait to raise the exception until euid/egid are restored to avoid
        # permission errors in writing to minion log.
        raise CommandExecutionError(
            'Exception encountered unpacking zipfile: {0}'.format(exc))
    finally:
        # Restore the euid/egid
        if runas:
            os.seteuid(euid)
            os.setegid(egid)

    return _trim_files(cleaned_files, trim_output)
Пример #32
0
 def bind(self, sock):
     old_umask = os.umask(self.conf.get("umask", 0))
     sock.bind(self.address)
     util.chown(self.address, self.conf.get("uid", os.geteuid()),
                self.conf.get("gid", os.getegid()))
     os.umask(old_umask)
Пример #33
0
    def _get_filelist_local(loc_list, local_uri, cache):
        info(u"Compiling list of local files...")

        if deunicodise(local_uri.basename()) == "-":
            try:
                uid = os.geteuid()
                gid = os.getegid()
            except:
                uid = 0
                gid = 0
            loc_list["-"] = {
                'full_name_unicode': '-',
                'full_name': '-',
                'size': -1,
                'mtime': -1,
                'uid': uid,
                'gid': gid,
                'dev': 0,
                'inode': 0,
            }
            return loc_list, True
        if local_uri.isdir():
            local_base = deunicodise(local_uri.basename())
            local_path = deunicodise(local_uri.path())
            if is_src and len(cfg.files_from):
                filelist = _get_filelist_from_file(cfg, local_path)
                single_file = False
            else:
                if cfg.follow_symlinks:
                    filelist = _fswalk_follow_symlinks(local_path)
                else:
                    filelist = _fswalk_no_symlinks(local_path)
                single_file = False
        else:
            local_base = ""
            local_path = deunicodise(local_uri.dirname())
            filelist = [(local_path, [], [deunicodise(local_uri.basename())])]
            single_file = True
        for root, dirs, files in filelist:
            rel_root = root.replace(local_path, local_base, 1)
            for f in files:
                full_name = os.path.join(root, f)
                if not os.path.isfile(full_name):
                    continue
                if os.path.islink(full_name):
                    if not cfg.follow_symlinks:
                        continue
                relative_file = unicodise(os.path.join(rel_root, f))
                if os.path.sep != "/":
                    # Convert non-unix dir separators to '/'
                    relative_file = "/".join(relative_file.split(os.path.sep))
                if cfg.urlencoding_mode == "normal":
                    relative_file = replace_nonprintables(relative_file)
                if relative_file.startswith('./'):
                    relative_file = relative_file[2:]
                try:
                    sr = os.stat_result(os.stat(full_name))
                except OSError, e:
                    if e.errno == errno.ENOENT:
                        # file was removed async to us getting the list
                        continue
                    else:
                        raise
                loc_list[relative_file] = {
                    'full_name_unicode': unicodise(full_name),
                    'full_name': full_name,
                    'size': sr.st_size,
                    'mtime': sr.st_mtime,
                    'dev': sr.st_dev,
                    'inode': sr.st_ino,
                    'uid': sr.st_uid,
                    'gid': sr.st_gid,
                    'sr': sr  # save it all, may need it in preserve_attrs_list
                    ## TODO: Possibly more to save here...
                }
                if 'md5' in cfg.sync_checks:
                    md5 = cache.md5(sr.st_dev, sr.st_ino, sr.st_mtime,
                                    sr.st_size)
                    if md5 is None:
                        try:
                            md5 = loc_list.get_md5(
                                relative_file)  # this does the file I/O
                        except IOError:
                            continue
                        cache.add(sr.st_dev, sr.st_ino, sr.st_mtime,
                                  sr.st_size, md5)
                    loc_list.record_hardlink(relative_file, sr.st_dev,
                                             sr.st_ino, md5)
Пример #34
0
def main():
    module = AnsibleModule(
        argument_spec=dict(
            # basic
            domain=dict(required=False, default=None),
            servers=dict(required=False, type='list', default=None),
            realm=dict(required=False, default=None),
            hostname=dict(required=False, default=None),
            ntp_servers=dict(required=False, type='list', default=None),
            ntp_pool=dict(required=False, default=None),
            no_ntp=dict(required=False, type='bool', default=False),
            force_ntpd=dict(required=False, type='bool', default=False),
            nisdomain=dict(required=False, default=None),
            no_nisdomain=dict(required=False, type='bool', default='no'),
            kinit_attempts=dict(required=False, type='int'),
            ca_cert_files=dict(required=False, type='list', default=None),
            configure_firefox=dict(required=False, type='bool', default=False),
            firefox_dir=dict(required=False),
            ip_addresses=dict(required=False, type='list', default=None),
            all_ip_addresses=dict(required=False, type='bool', default=False),
            on_master=dict(required=False, type='bool', default=False),
            # sssd
            enable_dns_updates=dict(required=False, type='bool',
                                    default=False),
        ),
        supports_check_mode=True,
    )

    # module._ansible_debug = True
    setup_logging()

    options.domain_name = module.params.get('domain')
    options.servers = module.params.get('servers')
    options.realm_name = module.params.get('realm')
    options.host_name = module.params.get('hostname')
    options.ntp_servers = module.params.get('ntp_servers')
    options.ntp_pool = module.params.get('ntp_pool')
    options.no_ntp = module.params.get('no_ntp')
    options.force_ntpd = module.params.get('force_ntpd')
    options.nisdomain = module.params.get('nisdomain')
    options.no_nisdomain = module.params.get('no_nisdomain')
    options.kinit_attempts = module.params.get('kinit_attempts')
    options.ca_cert_files = module.params.get('ca_cert_files')
    options.configure_firefox = module.params.get('configure_firefox')
    options.firefox_dir = module.params.get('firefox_dir')
    options.ip_addresses = module.params.get('ip_addresses')
    options.all_ip_addresses = module.params.get('all_ip_addresses')
    options.on_master = module.params.get('on_master')
    options.enable_dns_updates = module.params.get('enable_dns_updates')

    # Get domain from first server if domain is not set, but if there are
    # servers
    if options.domain_name is None and options.servers is not None:
        if len(options.servers) > 0:
            options.domain_name = options.servers[0][options.servers[0].
                                                     find(".") + 1:]

    try:
        self = options

        # HostNameInstallInterface

        if options.ip_addresses is not None:
            for value in options.ip_addresses:
                try:
                    CheckedIPAddress(value)
                except Exception as e:
                    raise ValueError("invalid IP address {0}: {1}".format(
                        value, e))

        # ServiceInstallInterface

        if options.domain_name:
            validate_domain_name(options.domain_name)

        if options.realm_name:
            # pylint: disable=deprecated-method
            argspec = inspect.getargspec(validate_domain_name)
            if "entity" in argspec.args:
                # NUM_VERSION >= 40690:
                validate_domain_name(options.realm_name, entity="realm")

        # ClientInstallInterface

        if options.kinit_attempts < 1:
            raise ValueError("expects an integer greater than 0.")

        # ClientInstallInterface.__init__

        if self.servers and not self.domain_name:
            raise RuntimeError(
                "--server cannot be used without providing --domain")

        if self.force_ntpd:
            logger.warning("Option --force-ntpd has been deprecated")

        if self.ntp_servers and self.no_ntp:
            raise RuntimeError(
                "--ntp-server cannot be used together with --no-ntp")

        if self.ntp_pool and self.no_ntp:
            raise RuntimeError(
                "--ntp-pool cannot be used together with --no-ntp")

        if self.no_nisdomain and self.nisdomain:
            raise RuntimeError(
                "--no-nisdomain cannot be used together with --nisdomain")

        if self.ip_addresses:
            if self.enable_dns_updates:
                raise RuntimeError("--ip-address cannot be used together with"
                                   " --enable-dns-updates")

            if self.all_ip_addresses:
                raise RuntimeError("--ip-address cannot be used together with"
                                   "--all-ip-addresses")

        # SSSDInstallInterface

        self.no_sssd = False

        # ClientInstall

        if options.ca_cert_files is not None:
            for value in options.ca_cert_files:
                if not isinstance(value, list):
                    raise ValueError("Expected list, got {!r}".format(value))
                # this is what init() does
                value = value[-1]
                if not os.path.exists(value):
                    raise ValueError("'%s' does not exist" % value)
                if not os.path.isfile(value):
                    raise ValueError("'%s' is not a file" % value)
                if not os.path.isabs(value):
                    raise ValueError("'%s' is not an absolute file path" %
                                     value)

                try:
                    x509.load_certificate_from_file(value)
                except Exception:
                    raise ValueError("'%s' is not a valid certificate file" %
                                     value)

        # self.prompt_password = self.interactive

        self.no_ac = False

        # ClientInstall.__init__

        if self.firefox_dir and not self.configure_firefox:
            raise RuntimeError(
                "--firefox-dir cannot be used without --configure-firefox "
                "option")

    except (RuntimeError, ValueError) as e:
        module.fail_json(msg=str(e))

    # ipaclient.install.client.init

    # root_logger
    options.debug = False
    if options.domain_name:
        options.domain = normalize_hostname(installer.domain_name)
    else:
        options.domain = None
    options.server = options.servers
    options.realm = options.realm_name
    # installer.primary = installer.fixed_primary
    # if installer.principal:
    #     installer.password = installer.admin_password
    # else:
    #     installer.password = installer.host_password
    installer.hostname = installer.host_name
    options.conf_ntp = not options.no_ntp
    # installer.trust_sshfp = installer.ssh_trust_dns
    # installer.conf_ssh = not installer.no_ssh
    # installer.conf_sshd = not installer.no_sshd
    # installer.conf_sudo = not installer.no_sudo
    # installer.create_sshfp = not installer.no_dns_sshfp
    if installer.ca_cert_files:
        installer.ca_cert_file = installer.ca_cert_files[-1]
    else:
        installer.ca_cert_file = None
    # installer.location = installer.automount_location
    installer.dns_updates = installer.enable_dns_updates
    # installer.krb5_offline_passwords = \
    #     not installer.no_krb5_offline_passwords
    installer.sssd = not installer.no_sssd

    try:

        # client

        # global variables
        hostname = None
        hostname_source = None
        nosssd_files = {}
        dnsok = False
        cli_domain = None
        cli_server = None
        # subject_base = None
        cli_realm = None
        cli_kdc = None
        client_domain = None
        cli_basedn = None
        # end of global variables

        # client.install_check

        logger.info("This program will set up FreeIPA client.")
        logger.info("Version %s", version.VERSION)
        logger.info("")

        cli_domain_source = 'Unknown source'
        cli_server_source = 'Unknown source'

        # fstore = sysrestore.FileStore(paths.IPA_CLIENT_SYSRESTORE)

        if not os.getegid() == 0:
            raise ScriptError("You must be root to run ipa-client-install.",
                              rval=CLIENT_INSTALL_ERROR)

        tasks.check_selinux_status()

        # if is_ipa_client_installed(fstore, on_master=options.on_master):
        #     logger.error("IPA client is already configured on this system.")
        #     logger.info(
        #       "If you want to reinstall the IPA client, uninstall it first "
        #       "using 'ipa-client-install --uninstall'.")
        #     raise ScriptError(
        #         "IPA client is already configured on this system.",
        #         rval=CLIENT_ALREADY_CONFIGURED)

        if check_ldap_conf is not None:
            check_ldap_conf()

        if options.conf_ntp:
            try:
                timeconf.check_timedate_services()
            except timeconf.NTPConflictingService as e:
                logger.info(
                    "WARNING: conflicting time&date synchronization service "
                    "'%s' will be disabled in favor of chronyd",
                    e.conflicting_service)
                logger.info("")
            except timeconf.NTPConfigurationError:
                pass

        # password, principal and keytab are checked in tasks/install.yml
        # if options.unattended and (
        #     options.password is None and
        #     options.principal is None and
        #     options.keytab is None and
        #     options.prompt_password is False and
        #     not options.on_master
        # ):
        #     raise ScriptError(
        #         "One of password / principal / keytab is required.",
        #         rval=CLIENT_INSTALL_ERROR)

        if options.hostname:
            hostname = options.hostname
            hostname_source = 'Provided as option'
        else:
            hostname = socket.getfqdn()
            hostname_source = "Machine's FQDN"
        if hostname != hostname.lower():
            raise ScriptError(
                "Invalid hostname '{}', must be lower-case.".format(hostname),
                rval=CLIENT_INSTALL_ERROR)

        if hostname in ('localhost', 'localhost.localdomain'):
            raise ScriptError(
                "Invalid hostname, '{}' must not be used.".format(hostname),
                rval=CLIENT_INSTALL_ERROR)

        if hasattr(constants, "MAXHOSTNAMELEN"):
            try:
                validate_hostname(hostname, maxlen=constants.MAXHOSTNAMELEN)
            except ValueError as e:
                raise ScriptError('invalid hostname: {}'.format(e),
                                  rval=CLIENT_INSTALL_ERROR)

        if hasattr(tasks, "is_nosssd_supported"):
            # --no-sssd is not supported any more for rhel-based distros
            if not tasks.is_nosssd_supported() and not options.sssd:
                raise ScriptError(
                    "Option '--no-sssd' is incompatible with the 'authselect' "
                    "tool provided by this distribution for configuring "
                    "system authentication resources",
                    rval=CLIENT_INSTALL_ERROR)

            # --noac is not supported any more for rhel-based distros
            if not tasks.is_nosssd_supported() and options.no_ac:
                raise ScriptError(
                    "Option '--noac' is incompatible with the 'authselect' "
                    "tool provided by this distribution for configuring "
                    "system authentication resources",
                    rval=CLIENT_INSTALL_ERROR)

        # when installing with '--no-sssd' option, check whether nss-ldap is
        # installed
        if not options.sssd:
            if not os.path.exists(paths.PAM_KRB5_SO):
                raise ScriptError("The pam_krb5 package must be installed",
                                  rval=CLIENT_INSTALL_ERROR)

            (nssldap_installed, nosssd_files) = nssldap_exists()
            if not nssldap_installed:
                raise ScriptError(
                    "One of these packages must be installed: nss_ldap or "
                    "nss-pam-ldapd",
                    rval=CLIENT_INSTALL_ERROR)

            # principal and keytab are checked in tasks/install.yml
            # if options.keytab and options.principal:
            #   raise ScriptError(
            #     "Options 'principal' and 'keytab' cannot be used together.",
            #     rval=CLIENT_INSTALL_ERROR)

            # keytab and force_join are checked in tasks/install.yml
            # if options.keytab and options.force_join:
            #   logger.warning("Option 'force-join' has no additional effect "
            #                  "when used with together with option 'keytab'.")

        # Added with freeipa-4.7.1 >>>
        # Remove invalid keytab file
        try:
            gssapi.Credentials(
                store={'keytab': paths.KRB5_KEYTAB},
                usage='accept',
            )
        except gssapi.exceptions.GSSError:
            logger.debug("Deleting invalid keytab: '%s'.", paths.KRB5_KEYTAB)
            remove_file(paths.KRB5_KEYTAB)
        # Added with freeipa-4.7.1 <<<

        # Check if old certificate exist and show warning
        if (not options.ca_cert_file
                and get_cert_path(options.ca_cert_file) == paths.IPA_CA_CRT):
            logger.warning("Using existing certificate '%s'.",
                           paths.IPA_CA_CRT)

        if not check_ip_addresses(options):
            raise ScriptError(
                "Failed to check ip addresses, check installation log",
                rval=CLIENT_INSTALL_ERROR)

        # Create the discovery instance
        # pylint: disable=invalid-name
        ds = ipadiscovery.IPADiscovery()

        ret = ds.search(domain=options.domain,
                        servers=options.server,
                        realm=options.realm_name,
                        hostname=hostname,
                        ca_cert_path=get_cert_path(options.ca_cert_file))

        if options.server and ret != 0:
            # There is no point to continue with installation as server list
            # was passed as a fixed list of server and thus we cannot discover
            # any better result
            logger.error("Failed to verify that %s is an IPA Server.",
                         ', '.join(options.server))
            logger.error(
                "This may mean that the remote server is not up "
                "or is not reachable due to network or firewall settings.")
            print_port_conf_info()
            raise ScriptError("Failed to verify that %s is an IPA Server." %
                              ', '.join(options.server),
                              rval=CLIENT_INSTALL_ERROR)

        if ret == ipadiscovery.BAD_HOST_CONFIG:
            logger.error("Can't get the fully qualified name of this host")
            logger.info("Check that the client is properly configured")
            raise ScriptError(
                "Can't get the fully qualified name of this host",
                rval=CLIENT_INSTALL_ERROR)
        if ret == ipadiscovery.NOT_FQDN:
            raise ScriptError(
                "{} is not a fully-qualified hostname".format(hostname),
                rval=CLIENT_INSTALL_ERROR)
        if ret in (ipadiscovery.NO_LDAP_SERVER, ipadiscovery.NOT_IPA_SERVER) \
                or not ds.domain:
            if ret == ipadiscovery.NO_LDAP_SERVER:
                if ds.server:
                    logger.debug("%s is not an LDAP server", ds.server)
                else:
                    logger.debug("No LDAP server found")
            elif ret == ipadiscovery.NOT_IPA_SERVER:
                if ds.server:
                    logger.debug("%s is not an IPA server", ds.server)
                else:
                    logger.debug("No IPA server found")
            else:
                logger.debug("Domain not found")
            if options.domain:
                cli_domain = options.domain
                cli_domain_source = 'Provided as option'
            elif options.unattended:
                raise ScriptError(
                    "Unable to discover domain, not provided on command line",
                    rval=CLIENT_INSTALL_ERROR)
            else:
                raise ScriptError("No interactive installation")
            #    logger.info(
            #        "DNS discovery failed to determine your DNS domain")
            #    cli_domain = user_input(
            #        "Provide the domain name of your IPA server "
            #        "(ex: example.com)",
            #        allow_empty=False)
            #    cli_domain_source = 'Provided interactively'
            #    logger.debug(
            #        "will use interactively provided domain: %s", cli_domain)
            ret = ds.search(domain=cli_domain,
                            servers=options.server,
                            hostname=hostname,
                            ca_cert_path=get_cert_path(options.ca_cert_file))

        if not cli_domain:
            if ds.domain:
                cli_domain = ds.domain
                cli_domain_source = ds.domain_source
                logger.debug("will use discovered domain: %s", cli_domain)

        client_domain = hostname[hostname.find(".") + 1:]

        if ret in (ipadiscovery.NO_LDAP_SERVER, ipadiscovery.NOT_IPA_SERVER) \
                or not ds.server:
            logger.debug("IPA Server not found")
            if options.server:
                cli_server = options.server
                cli_server_source = 'Provided as option'
            elif options.unattended:
                raise ScriptError("Unable to find IPA Server to join",
                                  rval=CLIENT_INSTALL_ERROR)
            else:
                raise ScriptError("No interactive installation")
            #    logger.debug("DNS discovery failed to find the IPA Server")
            #    cli_server = [
            #        user_input(
            #            "Provide your IPA server name (ex: ipa.example.com)",
            #            allow_empty=False)
            #    ]
            #    cli_server_source = 'Provided interactively'
            #    logger.debug(
            #      "will use interactively provided server: %s", cli_server[0])
            ret = ds.search(domain=cli_domain,
                            servers=cli_server,
                            hostname=hostname,
                            ca_cert_path=get_cert_path(options.ca_cert_file))

        else:
            # Only set dnsok to True if we were not passed in one or more
            # servers and if DNS discovery actually worked.
            if not options.server:
                (server, domain) = ds.check_domain(ds.domain, set(),
                                                   "Validating DNS Discovery")
                if server and domain:
                    logger.debug("DNS validated, enabling discovery")
                    dnsok = True
                else:
                    logger.debug("DNS discovery failed, disabling discovery")
            else:
                logger.debug(
                    "Using servers from command line, disabling DNS discovery")

        if not cli_server:
            if options.server:
                cli_server = ds.servers
                cli_server_source = 'Provided as option'
                logger.debug("will use provided server: %s",
                             ', '.join(options.server))
            elif ds.server:
                cli_server = ds.servers
                cli_server_source = ds.server_source
                logger.debug("will use discovered server: %s", cli_server[0])

        if ret == ipadiscovery.NOT_IPA_SERVER:
            logger.error("%s is not an IPA v2 Server.", cli_server[0])
            print_port_conf_info()
            logger.debug("(%s: %s)", cli_server[0], cli_server_source)
            raise ScriptError("%s is not an IPA v2 Server." % cli_server[0],
                              rval=CLIENT_INSTALL_ERROR)

        if ret == ipadiscovery.NO_ACCESS_TO_LDAP:
            logger.warning("Anonymous access to the LDAP server is disabled.")
            logger.info("Proceeding without strict verification.")
            logger.info("Note: This is not an error if anonymous access "
                        "has been explicitly restricted.")
            ret = 0

        if ret == ipadiscovery.NO_TLS_LDAP:
            logger.warning(
                "The LDAP server requires TLS is but we do not have the CA.")
            logger.info("Proceeding without strict verification.")
            ret = 0

        if ret != 0:
            logger.error("Failed to verify that %s is an IPA Server.",
                         cli_server[0])
            logger.error(
                "This may mean that the remote server is not up "
                "or is not reachable due to network or firewall settings.")
            print_port_conf_info()
            logger.debug("(%s: %s)", cli_server[0], cli_server_source)
            raise ScriptError("Failed to verify that %s is an IPA Server." %
                              cli_server[0],
                              rval=CLIENT_INSTALL_ERROR)

        cli_kdc = ds.kdc
        if dnsok and not cli_kdc:
            logger.error(
                "DNS domain '%s' is not configured for automatic "
                "KDC address lookup.", ds.realm.lower())
            logger.debug("(%s: %s)", ds.realm, ds.realm_source)
            logger.error("KDC address will be set to fixed value.")

        if dnsok:
            logger.info("Discovery was successful!")
        elif not options.unattended:
            raise ScriptError("No interactive installation")
        # if not options.server:
        #     logger.warning(
        #       "The failure to use DNS to find your IPA "
        #       "server indicates that your resolv.conf file is not properly "
        #       "configured.")
        # logger.info(
        #     "Autodiscovery of servers for failover cannot work "
        #     "with this configuration.")
        # logger.info(
        #   "If you proceed with the installation, services "
        #   "will be configured to always access the discovered server for "
        #   "all operations and will not fail over to other servers in case "
        #   "of failure.")
        # if not user_input(
        #     "Proceed with fixed values and no DNS discovery?", False):
        #     raise ScriptError(rval=CLIENT_INSTALL_ERROR)

        # Do not ask for time source
        # if options.conf_ntp:
        #     if not options.on_master and not options.unattended and not (
        #             options.ntp_servers or options.ntp_pool):
        #         options.ntp_servers, options.ntp_pool = \
        #             timeconf.get_time_source()

        cli_realm = ds.realm
        cli_realm_source = ds.realm_source
        logger.debug("will use discovered realm: %s", cli_realm)

        if options.realm_name and options.realm_name != cli_realm:
            logger.error(
                "The provided realm name [%s] does not match discovered "
                "one [%s]", options.realm_name, cli_realm)
            logger.debug("(%s: %s)", cli_realm, cli_realm_source)
            raise ScriptError(
                "The provided realm name [%s] does not match discovered "
                "one [%s]" % (options.realm_name, cli_realm),
                rval=CLIENT_INSTALL_ERROR)

        cli_basedn = ds.basedn
        cli_basedn_source = ds.basedn_source
        logger.debug("will use discovered basedn: %s", cli_basedn)
        # subject_base = DN(('O', cli_realm))

        logger.info("Client hostname: %s", hostname)
        logger.debug("Hostname source: %s", hostname_source)
        logger.info("Realm: %s", cli_realm)
        logger.debug("Realm source: %s", cli_realm_source)
        logger.info("DNS Domain: %s", cli_domain)
        logger.debug("DNS Domain source: %s", cli_domain_source)
        logger.info("IPA Server: %s", ', '.join(cli_server))
        logger.debug("IPA Server source: %s", cli_server_source)
        logger.info("BaseDN: %s", cli_basedn)
        logger.debug("BaseDN source: %s", cli_basedn_source)

        if not options.on_master:
            if options.ntp_servers:
                for server in options.ntp_servers:
                    logger.info("NTP server: %s", server)

            if options.ntp_pool:
                logger.info("NTP pool: %s", options.ntp_pool)

        # ipa-join would fail with IP address instead of a FQDN
        for srv in cli_server:
            try:
                socket.inet_pton(socket.AF_INET, srv)
                is_ipaddr = True
            except socket.error:
                try:
                    socket.inet_pton(socket.AF_INET6, srv)
                    is_ipaddr = True
                except socket.error:
                    is_ipaddr = False

            if is_ipaddr:
                logger.info()
                logger.warning(
                    "It seems that you are using an IP address "
                    "instead of FQDN as an argument to --server. The "
                    "installation may fail.")
                break

        # logger.info()
        # if not options.unattended and not user_input(
        #     "Continue to configure the system with these values?", False):
        #     raise ScriptError(rval=CLIENT_INSTALL_ERROR)

    except ScriptError as e:
        module.fail_json(msg=str(e))

    #########################################################################

    # client._install

    # May not happen in here at this time
    # if not options.on_master:
    #     # Try removing old principals from the keytab
    #     purge_host_keytab(cli_realm)

    # Check if ipa client is already configured
    if is_client_configured():
        client_already_configured = True

        # Check that realm and domain match
        current_config = get_ipa_conf()
        if cli_domain != current_config.get('domain'):
            module.fail_json(msg="IPA client already installed "
                             "with a conflicting domain")
        if cli_realm != current_config.get('realm'):
            module.fail_json(msg="IPA client already installed "
                             "with a conflicting realm")
    else:
        client_already_configured = False

    # Done
    module.exit_json(changed=False,
                     servers=cli_server,
                     domain=cli_domain,
                     realm=cli_realm,
                     kdc=cli_kdc,
                     basedn=str(cli_basedn),
                     hostname=hostname,
                     client_domain=client_domain,
                     dnsok=dnsok,
                     sssd=options.sssd,
                     ntp_servers=options.ntp_servers,
                     ntp_pool=options.ntp_pool,
                     client_already_configured=client_already_configured,
                     ipa_python_version=IPA_PYTHON_VERSION,
                     nosssd_files=nosssd_files)
Пример #35
0
    print('------')
    game = discord.Game(
        "No worries, im here for you, type \"b.help\" so i can help!")
    await bot.change_presence(status=discord.Status.online, activity=game)


##Check_if_folder_exists_start
if not os.path.isdir(blacklist_folder) or not os.path.isdir(
        summonerlist_folder):
    print("if party start")
    if not os.path.isdir(blacklist_folder):
        os.makedirs(blacklist_folder)
    if not os.path.isdir(summonerlist_folder):
        os.makedirs(summonerlist_folder)
    os.walk("chmod 700" + user_folder + " -R && chown " + str(os.geteuid()) +
            ":" + str(os.getegid()) + " -R")
#Check_if_folder_exists_end

#############################################################################
#								Definitions								    #
#############################################################################


def get_summoner(summoner_input):
    #summoner = Summoner(name=summoner_input)
    summoner = cass.get_summoner(name=summoner_input)
    return (summoner)


def getelo(ctx, arg1, region):  #arg2=region
    print(region)  ##DEBUGG
Пример #36
0
    
    
    # json.dumps(args.logfile,indent=6, sort_keys = True)
    # return 0

if __name__ == "__main__":
    
    if args.json:
        print "json Argument received"
        jsonDump(args.logfile)

    retCode = daemonization()
    procParams = """
    return code = %s
    process ID = %s
    parent process ID = %s
    process group ID = %s
    session ID = %s
    user ID = %s
    effective user ID = %s
    real group ID = %s
    effective group ID = %s
    """ % (retCode, os.getpid(), os.getppid(), os.getpgrp(), os.getsid(0),os.getuid(),os.geteuid(),os.getgid(),os.getegid())
    daemonLog = open(args.logfile,"w").write(procParams + "\n")
   
    


    
    daemonization()
Пример #37
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: chanafanghua
# @Date:   2018-01-05 20:38:46
# @Last Modified by:   chanafanghua
# @Last Modified time: 2018-01-05 20:58:39

import sys
import os
print("进程运行的pid:", os.getpid())
print("进程父进程的pid", os.getppid())
bash = os.system("ps aux | grep {}".format(os.getppid()))
print("父进程在ubuntu中的信息", bash)
print("调用进程的实际用户uid", os.getuid())
msg1 = os.system("cat /etc/passwd | grep {}".format(os.getuid()))
print(msg1)
print("调用进程的有效用户euid", os.geteuid())
msg2 = os.system("cat /etc/passwd | grep {}".format(os.geteuid()))
print(msg2)
print("调用进程的实际用户组gid", os.getgid())
print("调用进程的有效组egid", os.getegid())
Пример #38
0
sys.exit(ret)
'''

additional_code = ''

if args.copy_etc_hosts:
    additional_code = '''
with open('/etc/hosts', 'a') as f:
    f.write("""
    {etc_hosts_content}
""")
'''.format(etc_hosts_content=get_local_etc_hosts_entries())

command = command.format(args=pass_args,
                         uid=os.geteuid(),
                         gid=os.getegid(),
                         test_dir=args.test_dir,
                         shed_privileges=(platform.system() == 'Linux'),
                         report_path=args.report_path,
                         test_type=args.test_type,
                         additional_code=additional_code,
                         env_file=["--env-file={}".format(args.env_file)]
                         if args.env_file else "[]")

# 128MB or more required for chrome tests to run with xvfb
run_params = ['--shm-size=128m']

ret = docker.run(tty=True,
                 rm=True,
                 interactive=True,
                 workdir=script_dir,
Пример #39
0
def main():

    global DEBUG_MODE
    global ROOT_PATH

    euid = os.geteuid()
    egid = os.getegid()

    parser = OptionParser(usage='usage: %prog [options]')
    parser.add_option(
        '-i',
        '--id',
        dest='incident_prefix',
        default='osxcollect',
        help=
        '[OPTIONAL] An identifier which will be added as a prefix to the output file name.'
    )
    parser.add_option(
        '-o',
        '--outputfile',
        dest='output_file_name',
        default=None,
        help=
        '[OPTIONAL] Name of the output file. Default name uses the timestamp. Try \'/dev/stdout\' for fun!'
    )
    parser.add_option(
        '-p',
        '--path',
        dest='rootpath',
        default='/',
        help=
        '[OPTIONAL] Path to the OS X system to audit (e.g. /mnt/xxx). The running system will be audited if not specified.'
    )
    parser.add_option(
        '-s',
        '--section',
        dest='section_list',
        default=[],
        action='append',
        help=
        '[OPTIONAL] Just run the named section.  May be specified more than once.'
    )
    parser.add_option(
        '-d',
        '--debug',
        action='store_true',
        default=False,
        help='[OPTIONAL] Enable verbose output and python breakpoints.')
    options, _ = parser.parse_args()

    DEBUG_MODE = options.debug
    ROOT_PATH = options.rootpath

    if ROOT_PATH == '/' and (euid != 0 and egid != 0):
        Logger.log_error('Must run as root!\n')
        return

    # Create an incident ID
    prefix = options.incident_prefix
    incident_id = '{0}-{1}'.format(
        prefix,
        datetime.now().strftime('%Y_%m_%d-%H_%M_%S'))

    # Make a directory named for the output
    output_directory = './{0}'.format(incident_id)
    os.makedirs(output_directory)

    # Create an output file name
    output_file_name = options.output_file_name or pathjoin(
        output_directory, '{0}.json'.format(incident_id))

    # Collect information from plists and sqlite dbs and such
    with open(output_file_name, 'w') as output_file:
        Logger.set_output_file(output_file)
        with Logger.Extra('osxcollector_incident_id', incident_id):
            Collector().collect(section_list=options.section_list)

        # Archive log files
        log_file_archiver = LogFileArchiver()
        log_file_archiver.archive_logs(output_directory)
        log_file_archiver.compress_directory(incident_id, '.',
                                             output_directory)

        if not DEBUG_MODE:
            try:
                shutil.rmtree(output_directory)
            except Exception as e:
                Logger.log_exception(e)

    # Output message to the user
    sys.stderr.write('Wrote {0} lines.\nOutput in {1}.tar.gz\n'.format(
        Logger.lines_written, incident_id))
Пример #40
0
 def get_current_group(self, args):
     gid = os.getegid()
     return grp.getgrgid(gid).gr_name
Пример #41
0
def _node_up(image, bindir, dns_servers, config, db_node_mappings, logdir,
             configurator, storages_dockers):
    app_name = configurator.app_name()
    node_name = config['nodes']['node']['vm.args']['name']
    db_nodes = config['nodes']['node']['sys.config'][app_name]['db_nodes']

    for i in range(len(db_nodes)):
        db_nodes[i] = db_node_mappings[db_nodes[i]]

    (name, sep, hostname) = node_name.partition('@')
    (_, _, domain) = hostname.partition('.')

    bindir = os.path.abspath(bindir)

    command = '''set -e
mkdir -p /root/bin/node/log/
bindfs --create-for-user={uid} --create-for-group={gid} /root/bin/node/log /root/bin/node/log
cat <<"EOF" > /tmp/gen_dev_args.json
{gen_dev_args}
EOF
{mount_commands}
{pre_start_commands}
ln -s {bindir} /root/build
/root/bin/node/bin/{executable} console'''

    mount_commands = common.mount_nfs_command(config, storages_dockers)
    pre_start_commands = configurator.pre_start_commands(domain)
    command = command.format(bindir=bindir,
                             gen_dev_args=json.dumps(
                                 {configurator.app_name(): config}),
                             mount_commands=mount_commands,
                             pre_start_commands=pre_start_commands,
                             uid=os.geteuid(),
                             gid=os.getegid(),
                             executable=configurator.app_name())

    volumes = ['/root/bin', (bindir, bindir, 'ro')]
    volumes += configurator.extra_volumes(config, bindir, domain,
                                          storages_dockers)

    if logdir:
        logdir = os.path.join(os.path.abspath(logdir), hostname)
        os.makedirs(logdir)
        volumes.extend([(logdir, '/root/bin/node/log', 'rw')])

    container = docker.run(image=image,
                           name=hostname,
                           hostname=hostname,
                           detach=True,
                           interactive=True,
                           tty=True,
                           workdir=bindir,
                           volumes=volumes,
                           dns_list=dns_servers,
                           privileged=True,
                           command=command)

    # create system users and groups (if specified)
    if 'os_config' in config:
        common.create_users(container, config['os_config']['users'])
        common.create_groups(container, config['os_config']['groups'])

    return container, {
        'docker_ids': [container],
        configurator.nodes_list_attribute(): [node_name]
    }
Пример #42
0
    def _fork(self, path, uid, gid, executable, args, environment, **kwargs):
        """
        Fork and then exec sub-process.

        @param path: the path where to run the new process.
        @type path: C{str}
        @param uid: if defined, the uid used to run the new process.
        @type uid: C{int}
        @param gid: if defined, the gid used to run the new process.
        @type gid: C{int}
        @param executable: the executable to run in a new process.
        @type executable: C{str}
        @param args: arguments used to create the new process.
        @type args: C{list}.
        @param environment: environment used for the new process.
        @type environment: C{dict}.
        @param kwargs: keyword arguments to L{_setupChild} method.
        """
        settingUID = (uid is not None) or (gid is not None)
        if settingUID:
            curegid = os.getegid()
            currgid = os.getgid()
            cureuid = os.geteuid()
            curruid = os.getuid()
            if uid is None:
                uid = cureuid
            if gid is None:
                gid = curegid
            # prepare to change UID in subprocess
            os.setuid(0)
            os.setgid(0)

        collectorEnabled = gc.isenabled()
        gc.disable()
        try:
            self.pid = os.fork()
        except:
            # Still in the parent process
            if settingUID:
                os.setregid(currgid, curegid)
                os.setreuid(curruid, cureuid)
            if collectorEnabled:
                gc.enable()
            raise
        else:
            if self.pid == 0:  # pid is 0 in the child process
                # do not put *ANY* code outside the try block. The child process
                # must either exec or _exit. If it gets outside this block (due
                # to an exception that is not handled here, but which might be
                # handled higher up), there will be two copies of the parent
                # running in parallel, doing all kinds of damage.

                # After each change to this code, review it to make sure there
                # are no exit paths.
                try:
                    # Stop debugging. If I am, I don't care anymore.
                    sys.settrace(None)
                    self._setupChild(**kwargs)
                    self._execChild(path, settingUID, uid, gid, executable,
                                    args, environment)
                except:
                    # If there are errors, bail and try to write something
                    # descriptive to stderr.
                    # XXX: The parent's stderr isn't necessarily fd 2 anymore, or
                    #      even still available
                    # XXXX: however even libc assumes write(2, err) is a useful
                    #       thing to attempt
                    try:
                        stderr = os.fdopen(2, 'w')
                        stderr.write(
                            "Upon execvpe %s %s in environment %s\n:" %
                            (executable, str(args), "id %s" % id(environment)))
                        traceback.print_exc(file=stderr)
                        stderr.flush()
                        for fd in range(3):
                            os.close(fd)
                    except:
                        pass  # make *sure* the child terminates
                # Did you read the comment about not adding code here?
                os._exit(1)

        # we are now in parent process
        if settingUID:
            os.setregid(currgid, curegid)
            os.setreuid(curruid, cureuid)
        if collectorEnabled:
            gc.enable()
        self.status = -1  # this records the exit status of the child
Пример #43
0
    def test_simple_query(self):
        """
        Test querying a pool created on a single server.

        :avocado: tags=pool,poolquery,infotest
        """
        # create pool
        mode = self.params.get("mode", '/run/testparams/modes/*', 0731)
        if mode == 73:
            self.cancel('Cancel the mode test 73 because of DAOS-1877')

        uid = os.geteuid()
        gid = os.getegid()
        size = self.params.get("size", '/run/testparams/sizes/*', 0)
        group = self.server_group

        self.pool.create(mode, uid, gid, size, group, None)

        # connect to the pool
        flags = self.params.get("perms", '/run/testparams/connectperms/*', '')
        connect_flags = 1 << flags
        self.pool.connect(connect_flags)

        # query the pool
        pool_info = self.pool.pool_query()

        # check uuid
        uuid_str = c_uuid_to_str(pool_info.pi_uuid)
        if uuid_str != self.pool.get_uuid_str():
            self.d_log.error("UUID str does not match expected string")
            self.fail("UUID str does not match expected string")
        '''
        # validate size of pool is what we expect
        This check is currently disabled, as space is not implemented in
        DAOS C API yet.
        if size != pool_info.pi_space:
            self.d_log.error("expected size {0} did not match actual size {1}"
                      .format(size, pool_info.pi_space))
            self.fail("expected size {0} did not match actual size {1}"
                      .format(size, pool_info.pi_space))
        '''

        # number of targets
        if pool_info.pi_ntargets != len(self.hostlist):
            self.d_log.error("found number of targets in pool did not match "
                             "expected number, 1. num targets: {0}".format(
                                 pool_info.pi_ntargets))
            self.fail("found number of targets in pool did not match "
                      "expected number, 1. num targets: {0}".format(
                          pool_info.pi_ntargets))

        # number of disabled targets
        if pool_info.pi_ndisabled > 0:
            self.d_log.error("found disabled targets, none expected to be")
            self.fail("found disabled targets, none expected to be disabled")

        # mode
        if pool_info.pi_mode != mode:
            self.d_log.error(
                "found different mode than expected. expected {0}, "
                "found {1}.".format(mode, pool_info.pi_mode))
            self.fail("found different mode than expected. expected {0}, "
                      "found {1}.".format(mode, pool_info.pi_mode))

        # uid
        if pool_info.pi_uid != uid:
            self.d_log.error(
                "found actual pool uid {0} does not match expected "
                "uid {1}".format(pool_info.pi_uid, uid))
            self.fail("found actual pool uid {0} does not match expected uid "
                      "{1}".format(pool_info.pi_uid, uid))

        # gid
        if pool_info.pi_gid != gid:
            self.d_log.error(
                "found actual pool gid {0} does not match expected "
                "gid {1}".format(pool_info.pi_gid, gid))
            self.fail("found actual pool gid {0} does not match expected gid "
                      "{1}".format(pool_info.pi_gid, gid))
Пример #44
0
print(sys.platform)
#returns darwin
# Print out the version of Python you're using:
# YOUR CODE HERE

# A string containing the version number of the Python interpreter plus additional information on the build number and compiler used
print(sys.version)
# returns 3.8.3 (default, May 27 2020, 20:54:22)
# [Clang 11.0.3 (clang-1103.0.32.59)]

# A tuple containing the five components of the version number:
print(sys.version_info
      )  # major=3, minor=8, micro=3, releaselevel='final', serial=0

import os
# See the docs for the OS module: https://docs.python.org/3.7/library/os.html

# Print the current process ID
# YOUR CODE HERE
print(os.getegid())
# returns 20

# Print the current working directory (cwd):
# YOUR CODE HERE
print(os.getcwd())
# returns /Users/carlitosredding/Documents/GitHub/Python/Lambda-Python/Intro-Python/Python-Intro/src

# Print out your machine's login name
# YOUR CODE HERE
print(os.getlogin())
# returns carlitosredding
Пример #45
0
    def _get_metadata_proxy_user_group(cls, conf):
        user = conf.metadata_proxy_user or str(os.geteuid())
        group = conf.metadata_proxy_group or str(os.getegid())

        return user, group
Пример #46
0
#!/usr/bin/env python

import os
import sys

if not os.getegid() == 0:
    sys.exit('Script must be run as root')

from time import sleep
from pyA20.gpio import gpio
from pyA20.gpio import port


def pinName(name):
    if name == "PA10":
        return port.PA10
    elif name == "PA20":
        return port.PA20
    elif name == "PD14":
        return port.PD14
    elif name == "PC4":
        return port.PC4
    elif name == "PC7":
        return port.PC7
    elif name == "PA7":
        return port.PA7
    elif name == "PA8":
        return port.PA8
    else:
        return 0
Пример #47
0
class TestUnit(unittest.TestCase):

    current_dir = os.path.abspath(
        os.path.join(os.path.dirname(__file__), os.pardir))
    pardir = os.path.abspath(
        os.path.join(os.path.dirname(__file__), os.pardir, os.pardir))
    is_su = os.geteuid() == 0
    uid = os.geteuid()
    gid = os.getegid()
    architecture = platform.architecture()[0]
    system = platform.system()
    maxDiff = None

    detailed = False
    save_log = False
    print_log = False
    unsafe = False

    def __init__(self, methodName='runTest'):
        super().__init__(methodName)

        if re.match(r'.*\/run\.py$', sys.argv[0]):
            args, rest = TestUnit._parse_args()

            TestUnit._set_args(args)

    def run(self, result=None):
        if not hasattr(self, 'application_type'):
            return super().run(result)

        # rerun test for each available module version

        type = self.application_type
        for module in self.prerequisites['modules']:
            if module in self.available['modules']:
                for version in self.available['modules'][module]:
                    self.application_type = type + ' ' + version
                    super().run(result)

    @classmethod
    def main(cls):
        args, rest = TestUnit._parse_args()

        for i, arg in enumerate(rest):
            if arg[:5] == 'test_':
                rest[i] = cls.__name__ + '.' + arg

        sys.argv = sys.argv[:1] + rest

        TestUnit._set_args(args)

        unittest.main()

    @classmethod
    def setUpClass(cls, complete_check=True):
        cls.available = {'modules': {}, 'features': {}}
        unit = TestUnit()

        unit._run()

        # read unit.log

        for i in range(50):
            with open(unit.testdir + '/unit.log', 'r') as f:
                log = f.read()
                m = re.search('controller started', log)

                if m is None:
                    time.sleep(0.1)
                else:
                    break

        if m is None:
            unit.stop()
            exit("Unit is writing log too long")

        # discover available modules from unit.log

        for module in re.findall(r'module: ([a-zA-Z]+) (.*) ".*"$', log, re.M):
            if module[0] not in cls.available['modules']:
                cls.available['modules'][module[0]] = [module[1]]
            else:
                cls.available['modules'][module[0]].append(module[1])

        def check(available, prerequisites):
            missed = []

            # check modules

            if 'modules' in prerequisites:
                available_modules = list(available['modules'].keys())

                for module in prerequisites['modules']:
                    if module in available_modules:
                        continue

                    missed.append(module)

            if missed:
                print('Unit has no ' + ', '.join(missed) + ' module(s)')
                raise unittest.SkipTest()

            # check features

            if 'features' in prerequisites:
                available_features = list(available['features'].keys())

                for feature in prerequisites['features']:
                    if feature in available_features:
                        continue

                    missed.append(feature)

            if missed:
                print(', '.join(missed) + ' feature(s) not supported')
                raise unittest.SkipTest()

        def destroy():
            unit.stop()
            unit._check_alerts(log)
            shutil.rmtree(unit.testdir)

        def complete():
            destroy()
            check(cls.available, cls.prerequisites)

        if complete_check:
            complete()
        else:
            unit.complete = complete
            return unit

    def setUp(self):
        self._run()

    def _run(self):
        build_dir = self.pardir + '/build'
        self.unitd = build_dir + '/unitd'

        if not os.path.isfile(self.unitd):
            exit("Could not find unit")

        self.testdir = tempfile.mkdtemp(prefix='unit-test-')

        self.public_dir(self.testdir)

        if oct(stat.S_IMODE(os.stat(build_dir).st_mode)) != '0o777':
            self.public_dir(build_dir)

        os.mkdir(self.testdir + '/state')

        with open(self.testdir + '/unit.log', 'w') as log:
            self._p = subprocess.Popen(
                [
                    self.unitd,
                    '--no-daemon',
                    '--modules',
                    self.pardir + '/build',
                    '--state',
                    self.testdir + '/state',
                    '--pid',
                    self.testdir + '/unit.pid',
                    '--log',
                    self.testdir + '/unit.log',
                    '--control',
                    'unix:' + self.testdir + '/control.unit.sock',
                    '--tmp',
                    self.testdir,
                ],
                stderr=log,
            )

        atexit.register(self.stop)

        # Due to race between connect() and listen() after the socket binding
        # tests waits for unit.pid file which is created after listen().

        if not self.waitforfiles(self.testdir + '/unit.pid'):
            exit("Could not start unit")

        self.skip_alerts = [
            r'read signalfd\(4\) failed',
            r'sendmsg.+failed',
            r'recvmsg.+failed',
        ]
        self.skip_sanitizer = False

    def tearDown(self):
        self.stop()

        # detect errors and failures for current test

        def list2reason(exc_list):
            if exc_list and exc_list[-1][0] is self:
                return exc_list[-1][1]

        if hasattr(self, '_outcome'):
            result = self.defaultTestResult()
            self._feedErrorsToResult(result, self._outcome.errors)
        else:
            result = getattr(self, '_outcomeForDoCleanups',
                             self._resultForDoCleanups)

        success = not list2reason(result.errors) and not list2reason(
            result.failures)

        # check unit.log for alerts

        unit_log = self.testdir + '/unit.log'

        with open(unit_log, 'r', encoding='utf-8', errors='ignore') as f:
            self._check_alerts(f.read())

        # remove unit.log

        if not TestUnit.save_log and success:
            shutil.rmtree(self.testdir)

        else:
            self._print_log()

    def stop(self):
        self._stop()
        self.stop_processes()
        atexit.unregister(self.stop)

    def _stop(self):
        if self._p.poll() is not None:
            return

        with self._p as p:
            p.send_signal(signal.SIGQUIT)

            try:
                retcode = p.wait(15)
                if retcode:
                    self.fail("Child process terminated with code " +
                              str(retcode))
            except:
                p.kill()
                self.fail("Could not terminate unit")

    def run_process(self, target, *args):
        if not hasattr(self, '_processes'):
            self._processes = []

        process = Process(target=target, args=args)
        process.start()

        self._processes.append(process)

    def stop_processes(self):
        if not hasattr(self, '_processes'):
            return

        for process in self._processes:
            if process.is_alive():
                process.terminate()
                process.join(timeout=15)

                if process.is_alive():
                    self.fail('Fail to stop process')

    def waitforfiles(self, *files):
        for i in range(50):
            wait = False
            ret = False

            for f in files:
                if not os.path.exists(f):
                    wait = True
                    break

            if wait:
                time.sleep(0.1)

            else:
                ret = True
                break

        return ret

    def public_dir(self, path):
        os.chmod(path, 0o777)

        for root, dirs, files in os.walk(path):
            for d in dirs:
                os.chmod(os.path.join(root, d), 0o777)
            for f in files:
                os.chmod(os.path.join(root, f), 0o777)

    def _check_alerts(self, log):
        found = False

        alerts = re.findall('.+\[alert\].+', log)

        if alerts:
            print('All alerts/sanitizer errors found in log:')
            [print(alert) for alert in alerts]
            found = True

        if self.skip_alerts:
            for skip in self.skip_alerts:
                alerts = [al for al in alerts if re.search(skip, al) is None]

        if alerts:
            self._print_log(log)
            self.assertFalse(alerts, 'alert(s)')

        if not self.skip_sanitizer:
            sanitizer_errors = re.findall('.+Sanitizer.+', log)

            if sanitizer_errors:
                self._print_log(log)
                self.assertFalse(sanitizer_errors, 'sanitizer error(s)')

        if found:
            print('skipped.')

    @staticmethod
    def _parse_args():
        parser = argparse.ArgumentParser(add_help=False)

        parser.add_argument(
            '-d',
            '--detailed',
            dest='detailed',
            action='store_true',
            help='Detailed output for tests',
        )
        parser.add_argument(
            '-l',
            '--log',
            dest='save_log',
            action='store_true',
            help='Save unit.log after the test execution',
        )
        parser.add_argument(
            '-r',
            '--reprint_log',
            dest='print_log',
            action='store_true',
            help='Print unit.log to stdout in case of errors',
        )
        parser.add_argument(
            '-u',
            '--unsafe',
            dest='unsafe',
            action='store_true',
            help='Run unsafe tests',
        )

        return parser.parse_known_args()

    @staticmethod
    def _set_args(args):
        TestUnit.detailed = args.detailed
        TestUnit.save_log = args.save_log
        TestUnit.print_log = args.print_log
        TestUnit.unsafe = args.unsafe

        # set stdout to non-blocking

        if TestUnit.detailed or TestUnit.print_log:
            fcntl.fcntl(sys.stdout.fileno(), fcntl.F_SETFL, 0)

    def _print_log(self, data=None):
        path = self.testdir + '/unit.log'

        print('Path to unit.log:\n' + path + '\n')

        if TestUnit.print_log:
            if data is None:
                with open(path, 'r', encoding='utf-8', errors='ignore') as f:
                    data = f.read()

            print(data)
Пример #48
0
    def test_connect(self):
        """
        Test connecting to a pool.

        :avocado: tags=pool,poolconnect,quick
        """
        global basepath

        # Accumulate a list of pass/fail indicators representing what is
        # expected for each parameter then "and" them to determine the
        # expected result of the test
        expected_for_param = []

        setidlist = self.params.get("setname",'/run/tests/setnames/*')
        setid = setidlist[0]
        expected_for_param.append(setidlist[1])

        # if any parameter results in failure then the test should FAIL
        expected_result = 'PASS'
        for result in expected_for_param:
               if result == 'FAIL':
                      expected_result = 'FAIL'
                      break
        try:
               uid = os.geteuid()
               gid = os.getegid()

               # TODO make these params in the yaml
               daosctl = basepath + '/install/bin/daosctl'

               hostfile = basepath + self.params.get("hostfile",'/run/files/')
               host1 = GetHostsFromFile.getHostsFromFile(hostfile)[0]
               host2 = GetHostsFromFile.getHostsFromFile(hostfile)[1]

               create_cmd = ('{0} create-pool -m {1} -u {2} -g {3} '
                             '-s {4} -c 1'.format(
                            daosctl, 0731, uid, gid, setid))
               uuid_str = """{0}""".format(process.system_output(create_cmd))
               print("uuid is {0}\n".format(uuid_str))

               exists = CheckForPool.checkForPool(host1, uuid_str)
               if exists != 0:
                      self.fail("Pool {0} not found on host {1}.\n".
                                format(uuid_str, host1))
               exists = CheckForPool.checkForPool(host2, uuid_str)
               if exists != 0:
                      self.fail("Pool {0} not found on host {1}.\n".
                                format(uuid_str, host2))

               connect_cmd = ('{0} connect-pool -i {1} '
                              '-s {2} -r -l 0,1'.format(daosctl,
                   uuid_str, setid))
               process.system(connect_cmd)

               delete_cmd = ('{0} destroy-pool -i {1} -s {2} -f'.format(
                   daosctl, uuid_str, setid))

               if expected_result == 'FAIL':
                      self.fail("Expected to fail but passed.\n")

        except Exception as e:
               print e
               print traceback.format_exc()
               if expected_result == 'PASS':
                      self.fail("Expecting to pass but test has failed.\n")
Пример #49
0
    def test_poolsvc(self):
        """
        Test svc arg during pool create.

        :avocado: tags=all,pool,pr,medium,svc
        """

        # parameters used in pool create
        createmode = self.params.get("mode", '/run/createtests/createmode/*/')
        createuid = os.geteuid()
        creategid = os.getegid()
        createsetid = self.params.get("setname", '/run/createtests/createset/')
        createsize = self.params.get("size", '/run/createtests/createsize/')
        createsvc = self.params.get("svc", '/run/createtests/createsvc/*/')

        expected_result = createsvc[1]

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)
            self.pool.create(createmode, createuid, creategid,
                             createsize, createsetid, None, None, createsvc[0])
            self.pool.connect(1 << 1)

            # checking returned rank list for server more than 1
            iterator = 0
            while (
                    int(self.pool.svc.rl_ranks[iterator]) > 0 and
                    int(self.pool.svc.rl_ranks[iterator]) <= createsvc[0] and
                    int(self.pool.svc.rl_ranks[iterator]) != 999999
            ):
                iterator += 1
            if iterator != createsvc[0]:
                self.fail("Length of Returned Rank list is not equal to "
                          "the number of Pool Service members.\n")
            rank_list = []
            for iterator in range(createsvc[0]):
                rank_list.append(int(self.pool.svc.rl_ranks[iterator]))
                if len(rank_list) != len(set(rank_list)):
                    self.fail("Duplicate values in returned rank list")

            self.pool.pool_query()
            leader = self.pool.pool_info.pi_leader
            if createsvc[0] == 3:
                # kill pool leader and exclude it
                self.pool.pool_svc_stop()
                self.pool.exclude([leader])
                # perform pool disconnect, try connect again and disconnect
                self.pool.disconnect()
                self.pool.connect(1 << 1)
                self.pool.disconnect()
                # kill another server which is not a leader and exclude it
                server = DaosServer(self.context, self.server_group, 3)
                server.kill(1)
                self.pool.exclude([3])
                # perform pool connect
                self.pool.connect(1 << 1)

            if expected_result in ['FAIL']:
                self.fail("Test was expected to fail but it passed.\n")

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            if expected_result == 'PASS':
                self.fail("Test was expected to pass but it failed.\n")
Пример #50
0
def unzip(zip_file,
          dest,
          excludes=None,
          template=None,
          runas=None,
          trim_output=False):
    '''
    Uses the ``zipfile`` Python module to unpack zip files

    .. versionchanged:: 2015.5.0
        This function was rewritten to use Python's native zip file support.
        The old functionality has been preserved in the new function
        :mod:`archive.cmd_unzip <salt.modules.archive.cmd_unzip>`. For versions
        2014.7.x and earlier, see the :mod:`archive.cmd_zip
        <salt.modules.archive.cmd_zip>` documentation.

    zip_file
        Path of zip file to be unpacked

    dest
        The destination directory into which the file should be unpacked

    excludes : None
        Comma-separated list of files not to unpack. Can also be passed in a
        Python list.

    template : None
        Can be set to 'jinja' or another supported template engine to render
        the command arguments before execution:

        .. code-block:: bash

            salt '*' archive.unzip template=jinja /tmp/zipfile.zip /tmp/{{grains.id}}/ excludes=file_1,file_2

    runas : None
        Unpack the zip file as the specified user. Defaults to the user under
        which the minion is running.

    trim_output : False
        The number of files we should output on success before the rest are trimmed, if this is
        set to True then it will default to 100

    CLI Example:

    .. code-block:: bash

        salt '*' archive.unzip /tmp/zipfile.zip /home/strongbad/ excludes=file_1,file_2
    '''
    if not excludes:
        excludes = []
    if runas:
        euid = os.geteuid()
        egid = os.getegid()
        uinfo = __salt__['user.info'](runas)
        if not uinfo:
            raise SaltInvocationError(
                'User \'{0}\' does not exist'.format(runas))

    zip_file, dest = _render_filenames(zip_file, dest, None, template)

    if runas and (euid != uinfo['uid'] or egid != uinfo['gid']):
        # Change the egid first, as changing it after the euid will fail
        # if the runas user is non-privileged.
        os.setegid(uinfo['gid'])
        os.seteuid(uinfo['uid'])

    try:
        exc = None
        # Define cleaned_files here so that an exception will not prevent this
        # variable from being defined and cause a NameError in the return
        # statement at the end of the function.
        cleaned_files = []
        with contextlib.closing(zipfile.ZipFile(zip_file, "r")) as zfile:
            files = zfile.namelist()

            if isinstance(excludes, string_types):
                excludes = [x.strip() for x in excludes.split(',')]
            elif isinstance(excludes, (float, integer_types)):
                excludes = [str(excludes)]

            cleaned_files.extend([x for x in files if x not in excludes])
            for target in cleaned_files:
                if target not in excludes:
                    if salt.utils.is_windows() is False:
                        info = zfile.getinfo(target)
                        # Check if zipped file is a symbolic link
                        if info.external_attr == 2716663808:
                            source = zfile.read(target)
                            os.symlink(source, os.path.join(dest, target))
                            continue
                    zfile.extract(target, dest)
    except Exception as exc:
        pass
    finally:
        # Restore the euid/egid
        if runas:
            os.seteuid(euid)
            os.setegid(egid)
        if exc is not None:
            # Wait to raise the exception until euid/egid are restored to avoid
            # permission errors in writing to minion log.
            raise CommandExecutionError(
                'Exception encountered unpacking zipfile: {0}'.format(exc))

    return _trim_files(cleaned_files, trim_output)
Пример #51
0
import grp
import re
from datetime import date, datetime
from threading import Thread
from tempfile import TemporaryDirectory
from test import generic

sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import config
import snapshots
import tools

CURRENTUID = os.geteuid()
CURRENTUSER = pwd.getpwuid(CURRENTUID).pw_name

CURRENTGID = os.getegid()
CURRENTGROUP = grp.getgrgid(CURRENTGID).gr_name

#all groups the current user is member in
GROUPS = [i.gr_name for i in grp.getgrall() if CURRENTUSER in i.gr_mem]
NO_GROUPS = not GROUPS

IS_ROOT = os.geteuid() == 0

class TestSnapshots(generic.SnapshotsTestCase):
    def setUp(self):
        super(TestSnapshots, self).setUp()

        for f in (self.cfg.takeSnapshotLogFile(), self.cfg.takeSnapshotMessageFile()):
            if os.path.exists(f):
                os.remove(f)
Пример #52
0
class GridFUSE(Operations):

    DEFAULT = ('mongodb://127.0.0.1/gridfs/fs', )
    FMODE = (stat.S_IRWXU | stat.S_IROTH | stat.S_IRGRP) ^ stat.S_IRUSR
    DMODE = FMODE | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
    ST = ({
        'st_mode': stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO,
        'st_ino': 0,
        'st_dev': 0,
        'st_nlink': 1,
        'st_uid': os.geteuid(),
        'st_gid': os.getegid(),
        'st_size': 0,
        'st_atime': 0,
        'st_mtime': 0,
        'st_ctime': 0,
    })

    def __repr__(self):
        return '<%s.%s: %s>' % (__name__, self.__class__.__name__, ' '.join(
            [('%s=%r' % x) for x in [
                ('fs', self.fs),
            ]]))

    def __init__(self, nodes=None, db=None, coll=None, *args, **kwds):
        super(GridFUSE, self).__init__()
        nodes = nodes or GridFUSE.DEFAULT
        if isinstance(nodes, basestring):
            nodes = [nodes]
        cluster = list()
        for node in nodes:
            uri = urlsplit(node)
            if not uri.scheme:
                cluster.append(node)
                continue
            if uri.scheme != 'mongodb':
                raise TypeError('invalid uri.scheme: %r' % uri.scheme)
            node_db, _, node_coll = uri.path.strip('/').partition('/')
            if db is None and node_db:
                db = node_db
            if coll is None and node_coll:
                coll = node_coll.replace('/', '.')
            if node_db and uri.username is None:
                node_db = str()
            cluster.append(
                urlunsplit((
                    uri.scheme,
                    uri.netloc,
                    node_db,
                    uri.query,
                    uri.fragment,
                )))
        if not db or not coll:
            raise TypeError('undefined db and/or root collection')
        conn = self.conn = MongoClient(cluster)
        self.debug = bool(kwds.pop('debug', False))
        self.gfs = GridFS(conn[db], collection=coll)
        self.fs = conn[db][coll]
        self._ctx = Context(self)
        if not self.gfs.exists(filename=''):
            self.mkdir()

    def __call__(self, op, path, *args):
        if not hasattr(self, op):
            raise FuseOSError(EFAULT)
        ret = getattr(self, op)(path.strip('/'), *args)
        if self.debug:
            self._debug(op, path, args, ret)
        return ret

    def _debug(self, op, path, args, ret):
        own = op in self.__class__.__dict__
        sys.stderr.write('%s:%s:%i/%i/%i\n' %
                         ((op.upper(), own) + fuse_get_context()))
        sys.stderr.write(':: %s\n' % path)
        if op not in ('read', 'write'):
            sys.stderr.write(':: %s\n' % pf(args))
            sys.stderr.write(':: %s\n' % pf(ret))
        sys.stderr.write('\n')
        sys.stderr.flush()

    def getattr(self, path, fh):
        spec = None
        if fh is not None:
            fh, spec = self._ctx.get(fh)
        elif self.gfs.exists(filename=path, visible=True):
            spec = self.gfs.get_last_version(path)

        if spec is None:
            raise FuseOSError(ENOENT)

        st = spec.stat.copy()
        st['st_size'] = spec.length
        return st

    def rename(self, path, new):
        new = new.strip('/')
        dirname = basename = None
        if new:
            dirname, basename = pth.split(new)
        self.fs.files.update(
            {
                'filename': path,
                'visible': True
            },
            {'$set': {
                'filename': new,
                'dirname': dirname
            }},
            upsert=False,
            multi=False,
        )

    def chmod(self, path, mode):
        self.fs.files.update(
            {
                'filename': path,
                'visible': True
            },
            {'$set': {
                'stat.st_mode': mode
            }},
            upsert=False,
            multi=False,
        )

    def chown(self, path, uid, gid):
        self.fs.files.update(
            {
                'filename': path,
                'visible': True
            },
            {'$set': {
                'stat.st_uid': uid,
                'stat.st_gid': gid
            }},
            upsert=False,
            multi=False,
        )

    def _ent(self, path):
        if self.gfs.exists(filename=path, visible=True):
            raise FuseOSError(EEXIST)
        dirname = basename = None
        if path:
            dirname, basename = pth.split(path)
        now = time.time()
        st = self.ST.copy()
        st.update(st_ctime=now, st_mtime=now, st_atime=now)
        return self.gfs.new_file(
            filename=path,
            stat=st,
            dirname=dirname,
            visible=True,
        )

    def create(self, path, mode=FMODE, fi=None):
        with self._ent(path) as spec:
            spec._file['stat'].update(st_mode=mode | S_IFREG)
        file = spec._file
        file.pop('_id')
        fh, spec = self._ctx.acquire(GridIn(self.fs, **file))
        if fi is not None:
            fi.fh = fh
            return 0
        return fh

    def mkdir(self, path='', mode=DMODE):
        with self._ent(path) as spec:
            spec._file['stat'].update(st_mode=mode | S_IFDIR)
        return 0

    #TODO: impl?
    def link(self, path, source):
        raise FuseOSError(ENOTSUP)

    def symlink(self, path, source):
        with self._ent(path) as spec:
            spec._file['stat'].update(st_mode=0o0777 | S_IFLNK)
            spec.write(str(source))
        return 0

    def readlink(self, path):
        spec = None
        if self.gfs.exists(filename=path, visible=True):
            spec = self.gfs.get_last_version(path)

        if spec is None:
            raise FuseOSError(ENOENT)
        elif not spec.stat['st_mode'] & S_IFLNK > 0:
            raise FuseOSError(EINVAL)

        return spec.read()

    def readdir(self, path, fh):
        spec = None
        if fh is not None:
            fh, spec = self._ctx.get(fh)
        elif self.gfs.exists(filename=path, visible=True):
            spec = self.gfs.get_last_version(path)

        if spec is None:
            raise FuseOSError(ENOENT)
        elif not spec.stat['st_mode'] & S_IFDIR > 0:
            raise FuseOSError(ENOTDIR)

        for rel in ('.', '..'):
            yield rel

        for sub in self.fs.files.find({
                'dirname': path,
                'visible': True,
        }).distinct('filename'):
            yield pth.basename(sub)

    def open(self, path, flags=None):
        #TODO: handle os.O_* flags?
        fh, spec = self._ctx.get(path)
        if hasattr(flags, 'fh'):
            flags.fh = fh
            return 0
        return fh

    opendir = open

    def release(self, path, fh):
        return self._ctx.release(fh)

    releasedir = release

    def read(self, path, size, offset, fh):
        spec = self.gfs.get_last_version(path)
        spec.seek(offset, os.SEEK_SET)
        return spec.read(size)

    def write(self, path, data, offset, fh):
        if fh is not None:
            fh = getattr(fh, 'fh', fh)
            fh, spec = self._ctx.get(fh)
        elif self.gfs.exists(filename=path, visible=True):
            fh, spec = self._ctx.acquire(path)

        if not hasattr(spec, 'write'):
            self.truncate(path, 0, fh=fh)
            spec = self._ctx._fd[fh]
        spec.write(data)

        return len(data)

    def unlink(self, path):
        if not path:
            #...cannot remove mountpoint
            raise FuseOSError(EBUSY)

        spec = self.gfs.get_last_version(path)
        if spec is None or not spec.visible:
            raise FuseOSError(ENOENT)

        self.fs.files.update(
            {
                'filename': path,
                'visible': True
            },
            {'$set': {
                'visible': False
            }},
            upsert=False,
            multi=True,
        )

        return 0

    rmdir = unlink

    def truncate(self, path, length, fh=None):
        if length != 0:
            raise FuseOSError(ENOTSUP)

        spec = None
        if fh is not None:
            fh = getattr(fh, 'fh', fh)
            fh, spec = self._ctx.get(fh)
        elif self.gfs.exists(filename=path, visible=True):
            spec = self.gfs.get_last_version(path)
        if spec is None:
            raise FuseOSError(EBADF)

        if hasattr(spec, 'write') and spec._chunk_number == 0:
            spec._buffer.truncate(0)
            spec._buffer.seek(0)
            spec._position = 0
        else:
            #FIXME: this is terrible... whole class needs refactor
            fi = spec._file
            fi.pop('_id')
            with self.gfs.new_file(**fi) as zero:
                self.unlink(path)
            if fh:
                self._ctx.release(fh)
                self._ctx._fd[fh] = self.gfs.new_file(**fi)

        return 0
Пример #53
0
   return(0)

if __name__ == "__main__":

   retCode = createDaemon()

   # The code, as is, will create a new file in the root directory, when
   # executed with superuser privileges.  The file will contain the following
   # daemon related process parameters: return code, process ID, parent
   # process group ID, session ID, user ID, effective user ID, real group ID,
   # and the effective group ID.  Notice the relationship between the daemon's 
   # process ID, process group ID, and its parent's process ID.

   procParams = """
   return code = %s
   process ID = %s
   parent process ID = %s
   process group ID = %s
   session ID = %s
   user ID = %s
   effective user ID = %s
   real group ID = %s
   effective group ID = %s
   """ % (retCode, os.getpid(), os.getppid(), os.getpgrp(), os.getsid(0),
   os.getuid(), os.geteuid(), os.getgid(), os.getegid())

   open("createDaemon.log", "w").write(procParams + "\n")

   sys.exit(retCode)
Пример #54
0
def zip_(zip_file, sources, template=None, cwd=None, runas=None):
    '''
    Uses the ``zipfile`` Python module to create zip files

    .. versionchanged:: 2015.5.0
        This function was rewritten to use Python's native zip file support.
        The old functionality has been preserved in the new function
        :mod:`archive.cmd_zip <salt.modules.archive.cmd_zip>`. For versions
        2014.7.x and earlier, see the :mod:`archive.cmd_zip
        <salt.modules.archive.cmd_zip>` documentation.

    zip_file
        Path of zip file to be created

    sources
        Comma-separated list of sources to include in the zip file. Sources can
        also be passed in a Python list.

    template : None
        Can be set to 'jinja' or another supported template engine to render
        the command arguments before execution:

        .. code-block:: bash

            salt '*' archive.zip template=jinja /tmp/zipfile.zip /tmp/sourcefile1,/tmp/{{grains.id}}.txt

    cwd : None
        Use this argument along with relative paths in ``sources`` to create
        zip files which do not contain the leading directories. If not
        specified, the zip file will be created as if the cwd was ``/``, and
        creating a zip file of ``/foo/bar/baz.txt`` will contain the parent
        directories ``foo`` and ``bar``. To create a zip file containing just
        ``baz.txt``, the following command would be used:

        .. code-block:: bash

            salt '*' archive.zip /tmp/baz.zip baz.txt cwd=/foo/bar

    runas : None
        Create the zip file as the specified user. Defaults to the user under
        which the minion is running.


    CLI Example:

    .. code-block:: bash

        salt '*' archive.zip /tmp/zipfile.zip /tmp/sourcefile1,/tmp/sourcefile2
    '''
    if runas:
        euid = os.geteuid()
        egid = os.getegid()
        uinfo = __salt__['user.info'](runas)
        if not uinfo:
            raise SaltInvocationError(
                'User \'{0}\' does not exist'.format(runas))

    zip_file, sources = _render_filenames(zip_file, sources, None, template)

    if isinstance(sources, string_types):
        sources = [x.strip() for x in sources.split(',')]
    elif isinstance(sources, (float, integer_types)):
        sources = [str(sources)]

    if not cwd:
        for src in sources:
            if not os.path.isabs(src):
                raise SaltInvocationError(
                    'Relative paths require the \'cwd\' parameter')
    else:

        def _bad_cwd():
            raise SaltInvocationError('cwd must be absolute')

        try:
            if not os.path.isabs(cwd):
                _bad_cwd()
        except AttributeError:
            _bad_cwd()

    if runas and (euid != uinfo['uid'] or egid != uinfo['gid']):
        # Change the egid first, as changing it after the euid will fail
        # if the runas user is non-privileged.
        os.setegid(uinfo['gid'])
        os.seteuid(uinfo['uid'])

    try:
        exc = None
        archived_files = []
        with zipfile.ZipFile(zip_file, 'w', zipfile.ZIP_DEFLATED) as zfile:
            for src in sources:
                if cwd:
                    src = os.path.join(cwd, src)
                if os.path.exists(src):
                    if os.path.isabs(src):
                        rel_root = '/'
                    else:
                        rel_root = cwd if cwd is not None else '/'
                    if os.path.isdir(src):
                        for dir_name, sub_dirs, files in os.walk(src):
                            if cwd and dir_name.startswith(cwd):
                                arc_dir = salt.utils.relpath(dir_name, cwd)
                            else:
                                arc_dir = salt.utils.relpath(
                                    dir_name, rel_root)
                            if arc_dir:
                                archived_files.append(arc_dir + '/')
                                zfile.write(dir_name, arc_dir)
                            for filename in files:
                                abs_name = os.path.join(dir_name, filename)
                                arc_name = os.path.join(arc_dir, filename)
                                archived_files.append(arc_name)
                                zfile.write(abs_name, arc_name)
                    else:
                        if cwd and src.startswith(cwd):
                            arc_name = salt.utils.relpath(src, cwd)
                        else:
                            arc_name = salt.utils.relpath(src, rel_root)
                        archived_files.append(arc_name)
                        zfile.write(src, arc_name)
    except Exception as exc:
        pass
    finally:
        # Restore the euid/egid
        if runas:
            os.seteuid(euid)
            os.setegid(egid)
        if exc is not None:
            # Wait to raise the exception until euid/egid are restored to avoid
            # permission errors in writing to minion log.
            raise CommandExecutionError(
                'Exception encountered creating zipfile: {0}'.format(exc))

    return archived_files
Пример #55
0
def call(name, func, args=(), kws=None,
         onlyif=None,
         unless=None,
         **kwargs):
    '''
    Invoke a pre-defined Python function with arguments specified in the state
    declaration. This function is mainly used by the :mod:`salt.renderers.pydsl`
    renderer.

    The intepretation of `onlyif` and `unless` arguments are identical to those
    of :func:`salt.states.cmd.run`, and all other arguments(`cwd`, `runas`, ...)
    allowed by `cmd.run` are allowed here, except that their effects apply only
    to the commands specified in `onlyif` and `unless` rather than to the function
    to be invoked.

    In addition the `stateful` argument has no effects here.

    The return value of the invoked function will be interpreted as follows.

    If it's a dictionary then it will be passed through to the state system, which
    expects it to have the usual structure returned by any salt state function.

    Otherwise, the return value(denoted as ``result`` in the code below) is
    expected to be a JSON serializable object, and this dictionary is returned:

    .. code-block:: python

        { 'changes': { 'retval': result },
          'result': True if result is None else bool(result),
          'comment': result if isinstance(result, basestring) else ''
        }
    '''
    ret = {'name': name,
           'changes': {},
           'result': False,
           'comment': ''}

    cmd_kwargs = {'cwd': kwargs.get('cwd'),
                  'runas': kwargs.get('user'),
                  'shell': kwargs.get('shell') or __grains__['shell'],
                  'env': kwargs.get('env')}
    pgid = os.getegid()
    try:
        cret = _run_check(cmd_kwargs, onlyif, unless, None, None, None, None)
        if isinstance(cret, dict):
            ret.update(cret)
            return ret
    finally:
        os.setegid(pgid)
    if not kws:
        kws = {}
    result = func(*args, **kws)
    if isinstance(result, dict):
        ret.update(result)
        return ret
    else:
        # result must be json serializable else we get an error
        ret['changes'] = {'retval': result}
        ret['result'] = True if result is None else bool(result)
        if isinstance(result, basestring):
            ret['comment'] = result
        return ret
Пример #56
0
    def create_connection(
            self, ccache=None, bind_dn=None, bind_pw='', cacert=None,
            autobind=AUTOBIND_AUTO, serverctrls=None, clientctrls=None,
            time_limit=_missing, size_limit=_missing):
        """
        Connect to LDAP server.

        Keyword arguments:
        ldapuri -- the LDAP server to connect to
        ccache -- Kerberos ccache name
        bind_dn -- dn used to bind to the server
        bind_pw -- password used to bind to the server
        debug_level -- LDAP debug level option
        cacert -- TLS CA certificate filename
        autobind - autobind as the current user
        time_limit, size_limit -- maximum time and size limit for LDAP
            possible options:
                - value - sets the given value
                - None - reads value from ipaconfig
                - _missing - keeps previously configured settings
                             (unlimited set by default in constructor)

        Extends backend.Connectible.create_connection.
        """
        if bind_dn is None:
            bind_dn = DN(('cn', 'directory manager'))
        assert isinstance(bind_dn, DN)

        if cacert is None:
            cacert = paths.IPA_CA_CRT

        if time_limit is not _missing:
            object.__setattr__(self, 'time_limit', time_limit)
        if size_limit is not _missing:
            object.__setattr__(self, 'size_limit', size_limit)

        client = LDAPCache(
            self.ldap_uri,
            force_schema_updates=self._force_schema_updates,
            enable_cache=self._enable_cache,
            cacert=cacert)
        conn = client._conn

        with client.error_handler():
            minssf = conn.get_option(_ldap.OPT_X_SASL_SSF_MIN)
            maxssf = conn.get_option(_ldap.OPT_X_SASL_SSF_MAX)
            # Always connect with at least an SSF of 56, confidentiality
            # This also protects us from a broken ldap.conf
            if minssf < 56:
                minssf = 56
                conn.set_option(_ldap.OPT_X_SASL_SSF_MIN, minssf)
                if maxssf < minssf:
                    conn.set_option(_ldap.OPT_X_SASL_SSF_MAX, minssf)

        ldapi = self.ldap_uri.startswith('ldapi://')

        if bind_pw:
            client.simple_bind(bind_dn, bind_pw,
                               server_controls=serverctrls,
                               client_controls=clientctrls)
        elif autobind != AUTOBIND_DISABLED and os.getegid() == 0 and ldapi:
            try:
                client.external_bind(server_controls=serverctrls,
                                     client_controls=clientctrls)
            except errors.NotFound:
                if autobind == AUTOBIND_ENABLED:
                    # autobind was required and failed, raise
                    # exception that it failed
                    raise
        else:
            if ldapi:
                with client.error_handler():
                    conn.set_option(_ldap.OPT_HOST_NAME, self.api.env.host)
            if ccache is None:
                os.environ.pop('KRB5CCNAME', None)
            else:
                os.environ['KRB5CCNAME'] = ccache

            principal = krb_utils.get_principal(ccache_name=ccache)

            client.gssapi_bind(server_controls=serverctrls,
                               client_controls=clientctrls)
            setattr(context, 'principal', principal)

        return conn
Пример #57
0
def script(name,
        source=None,
        template=None,
        onlyif=None,
        unless=None,
        cwd=None,
        user=None,
        group=None,
        shell=None,
        env=None,
        stateful=False,
        **kwargs):
    '''
    Download a script from a remote source and execute it. The name can be the
    source or the source value can be defined.

    source
        The source script being downloaded to the minion, this source script is
        hosted on the salt master server.  If the file is located on the master
        in the directory named spam, and is called eggs, the source string is
        salt://spam/eggs

    template
        If this setting is applied then the named templating engine will be
        used to render the downloaded file, currently jinja, mako, and wempy
        are supported

    name
        The command to execute, remember that the command will execute with the
        path and permissions of the salt-minion.

    onlyif
        A command to run as a check, run the named command only if the command
        passed to the ``onlyif`` option returns true

    unless
        A command to run as a check, only run the named command if the command
        passed to the ``unless`` option returns false

    cwd
        The current working directory to execute the command in, defaults to
        /root

    user
        The user name to run the command as

    group
        The group context to run the command as

    shell
        The shell to use for execution, defaults to the shell grain

    env
        The root directory of the environment for the referencing script. The
        environments are defined in the master config file.

    stateful
        The command being executed is expected to return data about executing
        a state
    '''
    ret = {'changes': {},
           'comment': '',
           'name': name,
           'result': False}

    if cwd and not os.path.isdir(cwd):
        ret['comment'] = 'Desired working directory is not available'
        return ret

    if env is None:
        env = kwargs.get('__env__', 'base')

    pgid = os.getegid()

    cmd_kwargs = copy.deepcopy(kwargs)
    cmd_kwargs.update({
                  'runas': user,
                  'shell': shell or __grains__['shell'],
                  'env': env,
                  'onlyif': onlyif,
                  'unless': unless,
                  'user': user,
                  'group': group,
                  'cwd': cwd,
                  'template': template})

    run_check_cmd_kwargs = {'cwd': cwd,
                  'runas': user,
                  'shell': shell or __grains__['shell'], }

    # Change the source to be the name arg if it is not specified
    if source is None:
        source = name

    try:
        cret = _run_check(run_check_cmd_kwargs, onlyif, unless, cwd, user, group, shell)
        if isinstance(cret, dict):
            ret.update(cret)
            return ret

        if __opts__['test']:
            ret['result'] = None
            ret['comment'] = 'Command "{0}" would have been executed'
            ret['comment'] = ret['comment'].format(name)
            return _reinterpreted_state(ret) if stateful else ret

        # Wow, we passed the test, run this sucker!
        try:
            cmd_all = __salt__['cmd.script'](source, **cmd_kwargs)
        except CommandExecutionError as err:
            ret['comment'] = str(err)
            return ret

        ret['changes'] = cmd_all
        if kwargs.get('retcode', False):
            ret['result'] = not bool(cmd_all)
        else:
            ret['result'] = not bool(cmd_all['retcode'])
        ret['comment'] = 'Command "{0}" run'.format(name)
        return _reinterpreted_state(ret) if stateful else ret

    finally:
        os.setegid(pgid)
Пример #58
0
def run(name,
        onlyif=None,
        unless=None,
        cwd=None,
        user=None,
        group=None,
        shell=None,
        env=(),
        stateful=False,
        **kwargs):
    '''
    Run a command if certain circumstances are met

    name
        The command to execute, remember that the command will execute with the
        path and permissions of the salt-minion.

    onlyif
        A command to run as a check, run the named command only if the command
        passed to the ``onlyif`` option returns true

    unless
        A command to run as a check, only run the named command if the command
        passed to the ``unless`` option returns false

    cwd
        The current working directory to execute the command in, defaults to
        /root

    user
        The user name to run the command as

    group
        The group context to run the command as

    shell
        The shell to use for execution, defaults to the shell grain

    env
        The root directory of the environment for the referencing script. The
        environments are defined in the master config file.

    stateful
        The command being executed is expected to return data about executing
        a state
    '''
    ret = {'name': name,
           'changes': {},
           'result': False,
           'comment': ''}

    if cwd and not os.path.isdir(cwd):
        ret['comment'] = 'Desired working directory is not available'
        return ret

    if env:
        _env = {}
        for var in env.split():
            try:
                key, val = var.split('=')
                _env[key] = val
            except ValueError:
                ret['comment'] = 'Invalid enviromental var: "{0}"'.format(var)
                return ret
        env = _env

    pgid = os.getegid()

    cmd_kwargs = {'cwd': cwd,
                  'runas': user,
                  'shell': shell or __grains__['shell'],
                  'env': env}

    try:
        cret = _run_check(cmd_kwargs, onlyif, unless, cwd, user, group, shell)
        if isinstance(cret, dict):
            ret.update(cret)
            return ret

        # Wow, we passed the test, run this sucker!
        if not __opts__['test']:
            try:
                cmd_all = __salt__['cmd.run_all'](name, **cmd_kwargs)
            except CommandExecutionError as err:
                ret['comment'] = str(err)
                return ret

            ret['changes'] = cmd_all
            ret['result'] = not bool(cmd_all['retcode'])
            ret['comment'] = 'Command "{0}" run'.format(name)
            return _reinterpreted_state(ret) if stateful else ret
        ret['result'] = None
        ret['comment'] = 'Command "{0}" would have been executed'.format(name)
        return _reinterpreted_state(ret) if stateful else ret

    finally:
        os.setegid(pgid)
Пример #59
0
def have_root():
    """Return ``True`` if the test suite is running as the root user,
        and ``False`` otherwise.
    """
    return geteuid() == 0 and getegid() == 0
Пример #60
0
from pathlib import Path
import subprocess
import shutil
import os
from git import Repo
import yaml
from intermine_boot import utils

DOCKER_COMPOSE_REPO = 'https://github.com/intermine/docker-intermine-gradle'

ENV_VARS = ['env', 'UID='+str(os.geteuid()), 'GID='+str(os.getegid())]

def _get_compose_path(options, env):
    work_dir = env['data_dir'] / 'docker'
    compose_file = 'dockerhub.docker-compose.yml'
    if options['build_images']:
        compose_file = 'local.docker-compose.yml'
    return work_dir / compose_file

def _create_volume_dirs(compose_path):
    with open(compose_path, 'r') as stream:
        compose_dict = yaml.safe_load(stream)

        for service in compose_dict['services']:
            service_dict = compose_dict['services'][service]

            if 'volumes' not in service_dict:
                continue

            volumes = service_dict['volumes']