Example #1
0
File: fsck.py Project: drewlu/ossql
 def check_lof(self):
     """Ensure that there is a lost+found directory"""
 
     log.info('Checking lost+found...')
 
     timestamp = time.time() - time.timezone
     try:
         (inode_l, name_id) = self.conn.get_row("SELECT inode, name_id FROM contents_v "
                                                "WHERE name=? AND parent_inode=?", (b"lost+found", ROOT_INODE))
 
     except NoSuchRowError:
         self.found_errors = True
         self.log_error("Recreating missing lost+found directory")
         inode_l = self.conn.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount) "
                                   "VALUES (?,?,?,?,?,?,?)",
                                   (stat.S_IFDIR | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR,
                                    os.getuid(), os.getgid(), timestamp, timestamp, timestamp, 1))
         self.conn.execute("INSERT INTO contents (name_id, inode, parent_inode) VALUES(?,?,?)",
                           (self._add_name(b"lost+found"), inode_l, ROOT_INODE))
 
 
     mode = self.conn.get_val('SELECT mode FROM inodes WHERE id=?', (inode_l,))
     if not stat.S_ISDIR(mode):
         self.found_errors = True
         self.log_error('/lost+found is not a directory! Old entry will be saved as '
                        '/lost+found/inode-%s*', inode_l)
         # We leave the old inode unassociated, so that it will be added
         # to lost+found later on.
         inode_l = self.conn.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount) "
                                   "VALUES (?,?,?,?,?,?,?)",
                                   (stat.S_IFDIR | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR,
                                    os.getuid(), os.getgid(), timestamp, timestamp, timestamp, 2))
         self.conn.execute('UPDATE contents SET inode=? WHERE name_id=? AND parent_inode=?',
                           (inode_l, name_id, ROOT_INODE))
Example #2
0
	def _packageFolder(tfile, relbasepath, docrootbasepath):
		getLogger().debug("Walking folder %s..." % docrootbasepath)
		for entry in FileSystemManager.instance().getdir(docrootbasepath):
			name, apptype = entry['name'], entry['type']
			if apptype == FileSystemManager.APPTYPE_DIR:
				relpath = "%s%s/" % (relbasepath, name) # the name of the current item within the package
				docrootpath = "%s%s/" % (docrootbasepath, name) # the name of the current item within the docroot
				getLogger().debug("Adding directory %s..." % relpath)
				tarinfo = tarfile.TarInfo(relpath)
				tarinfo.type = tarfile.DIRTYPE
				tarinfo.mode = 0755
				tarinfo.uid = os.getuid()
				tarinfo.gid = os.getgid()
				tarinfo.mtime = time.time()
				tfile.addfile(tarinfo)
				_packageFolder(tfile, relbasepath = relpath, docrootbasepath = docrootpath)
			else:
				relname = "%s%s" % (relbasepath, name) # the name of the current item within the package
				docrootname = "%s%s" % (docrootbasepath, name) # the name of the current item within the docroot
				getLogger().debug("Adding file %s..." % relname)
				tarinfo = tarfile.TarInfo(relname)
				tarinfo.type = tarfile.AREGTYPE
				tarinfo.mode = 0644
				tarinfo.uid = os.getuid()
				tarinfo.gid = os.getgid()
				tarinfo.mtime = time.time()
				content = FileSystemManager.instance().read(docrootname)
				tarinfo.size = len(content)
				contentObj = StringIO.StringIO(content)
				tfile.addfile(tarinfo, contentObj)
				contentObj.close()
				getLogger().debug("File %s added to package file (%s bytes)" % (relname, tarinfo.size))
Example #3
0
def access(filename,mode):
    if mode == os.F_OK: return exists(filename)

    st = stat(filename)
    filemode = st.st_mode
    uid = st.st_uid
    gid = st.st_gid
    if mode & os.R_OK:
        rOK = ( filemode & statconsts.S_IROTH ) or \
              ( filemode & statconsts.S_IRGRP and os.getgid()  == gid ) or \
              ( filemode & statconsts.S_IRUSR and os.getuid()  == uid ) or \
              ( filemode & statconsts.S_ISGID and os.getegid() == gid ) or \
              ( filemode & statconsts.S_ISUID and os.geteuid() == uid )
    else:
        rOK = True

    if mode & os.W_OK:
        wOK = ( filemode & statconsts.S_IWOTH ) or \
              ( filemode & statconsts.S_IWGRP and os.getgid()  == gid ) or \
              ( filemode & statconsts.S_IWUSR and os.getuid()  == uid ) or \
              ( filemode & statconsts.S_ISGID and os.getegid() == gid ) or \
              ( filemode & statconsts.S_ISUID and os.geteuid() == uid )
    else:
        wOK = True

    if mode & os.X_OK:
        xOK = ( filemode & statconsts.S_IXOTH ) or \
              ( filemode & statconsts.S_IXGRP and os.getgid()  == gid ) or \
              ( filemode & statconsts.S_IXUSR and os.getuid()  == uid ) or \
              ( filemode & statconsts.S_ISGID and os.getegid() == gid ) or \
              ( filemode & statconsts.S_ISUID and os.geteuid() == uid )
    else:
        xOK = True

    return rOK and wOK and xOK
Example #4
0
def run_tests():
    cr = lambda func: catbox.run(func, [os.getcwd(),'/var/tmp'], logger)

    '''remember the owner'''
    os.chown('/var/tmp/hello.txt', os.getuid(), os.getgid())
    ret = cr(lambda: os.chown('/var/tmp/hello.txt', 0, 0))
    print "(retcode, ownerships) == ", ret.ret, ret.ownerships
    assert '/var/tmp/hello.txt' in ret.ownerships.keys(), "Chown: owner change was not trapped"
    assert os.stat('/var/tmp/hello.txt').st_uid == os.getuid(), "Chown: owner was changed"

    '''remember the mode'''
    os.chmod('/var/tmp/hello.txt', int("700", 8))
    ret = cr(lambda: os.chmod('/var/tmp/hello.txt', int("755", 8)))
    print "(retcode, modes) == ",ret.ret, ret.modes
    assert '/var/tmp/hello.txt' in ret.modes.keys(), "Chmod: mode change was not trapped"
    assert stat.S_IMODE(os.stat('/var/tmp/hello.txt').st_mode) == int("700", 8), \
            "Chmod: mode was changed"


    '''return the right uid, gid'''
    ret = cr( uid )
    print "(cr(uid), uid) == ", ret.ret, os.getuid()
    assert ret.ret == 0, "getuid/getuid32 not trapped"

    ret = cr( gid )    
    print "(cr(gid), gid) == ", ret.ret, os.getgid()
    assert ret.ret == 0, "getgid/getgid32 not trapped"
Example #5
0
def setup_user_mapping(pid, uid=os.getuid(), gid=os.getgid()):
    """Write uid_map and gid_map in /proc to create a user mapping
    that maps our user from outside the container to the same user inside the container
    (and no other users are mapped).
    @see: http://man7.org/linux/man-pages/man7/user_namespaces.7.html
    @param pid: The PID of the process in the container.
    """
    proc_child = os.path.join("/proc", str(pid))
    try:
        uid_map = "{0} {1} 1".format(uid, os.getuid()) # map uid internally to our uid externally
        util.write_file(uid_map, proc_child, "uid_map")
    except IOError as e:
        logging.warning("Creating UID mapping into container failed: %s", e)

    try:
        util.write_file("deny", proc_child, "setgroups")
    except IOError as e:
        # Not all systems have this file (depends on the kernel version),
        # but if it does not exist, we do not need to write to it.
        if e.errno != errno.ENOENT:
            logging.warning("Could not write to setgroups file in /proc: %s", e)

    try:
        gid_map = "{0} {1} 1".format(gid, os.getgid()) # map gid internally to our gid externally
        util.write_file(gid_map, proc_child, "gid_map")
    except IOError as e:
        logging.warning("Creating GID mapping into container failed: %s", e)
Example #6
0
	def	getattr(self, path, fh=None):
		if self.verbosity>=3: print 'getattr(%s,%s)' % (path,str(fh)),
		path=self.fixpath(path)

		(dirname,filename) = os.path.split(path)
		parents = self.dirparents(path)
		
		self.getcache( parents[0], 0 )
		self.getcache( parents[0], 1 )
		if self.cache[0] is None: 
			self.path2caches( parents[0] ) # maybe it just expired?  retry

		if self.cache[0] is None:
			raise FuseOSError(ENOENT)
			

		l=self.cache[0];f=[];d=[]
		if filename=='': 
			return dict( st_gid=os.getgid(), st_uid=os.getuid(), st_size=666, st_mode=0755|S_IFDIR, st_atime=time.time(), st_ctime=time.time(), st_mtime=time.time() )
		for o in l['files']:
			if filename in o['name']:
				return dict( st_gid=os.getgid(), st_uid=os.getuid(), st_size=o['size'], st_mode=0755|S_IFREG, st_atime=o['ctime'], st_ctime=o['ctime'], st_mtime=o['mtime'] )
		for o in l['directories']:
			if filename in o['name']:
				return dict( st_gid=os.getgid(), st_uid=os.getuid(), st_size=o['size'], st_mode=0755|S_IFDIR, st_atime=o['ctime'], st_ctime=o['ctime'], st_mtime=o['mtime'] )
		raise FuseOSError(ENOENT)
Example #7
0
def main():
	print os.getuid()
	print os.getgid()
	print os.environ['USER']
	os.environ['USER']='******'
	print os.environ['USER']


	# print os.environ['USERNAME']
	print os.environ['LOGNAME']
	# print os.getlogin()
	print getpass.getuser(), 'ttttttttttt'
	# os.setuid(0)
	# print os.getuid()

	os.getlogin = lambda: pwd.getpwuid(os.getuid())[0]

	print os.getlogin

	newuid = pwd.getpwnam('frank').pw_uid
	print newuid
	os.setuid(newuid)    
	print('User :'******'test')

	if not os.geteuid() == 0:
		
		sys.exit('Script must be run as root')
Example #8
0
 def getattr(self, path, fh=None):
     try:
         content = self.cache[path]
     except KeyError:
         node = self.get_node(path)
         has_perm = bool(self.view.get_key(path))
         if node.entry.action == node.entry.MKDIR:
             mode = stat.S_IFDIR | (0o0750 if has_perm else 0o0550)
         else:
             mode = stat.S_IFREG | (0o0640 if has_perm else 0o0440)
         return {
             'st_atime': node.entry.timestamp,
             'st_ctime': node.entry.ctime,
             'st_gid': os.getgid(),
             'st_mode': mode, 
             'st_mtime': node.entry.timestamp,
             'st_nlink': 1,
             'st_size': len(node.content),
             'st_uid': os.getuid(),
         }
     else:
         import time
         return {
             'st_atime': time.time(),
             'st_ctime': time.time(),
             'st_gid': os.getgid(),
             'st_mode': stat.S_IFREG | 0o0640,
             'st_mtime': time.time(),
             'st_nlink': 1,
             'st_size': len(content),
             'st_uid': os.getuid(),
         }
Example #9
0
    def callerid(self):
        """Property returning the mCollective PSK caller id.

        :return:
        """

        if 'plugin.psk.callertype' in self.config:
            caller_type = self.config['plugin.psk.callertype']
        else:
            caller_type = 'uid'

        if caller_type == 'gid':
            caller_id = 'gid={}'.format(os.getgid())
        elif caller_type == 'group':
            if platform.system() == 'Windows':
                raise exc.PyMcoException("Cannot use the 'group' callertype for the PSK security plugin on the Windows platform")
            caller_id = 'group={}'.format(grp.getgrnam(os.getgid()))
        elif caller_type == 'user':
            caller_id = 'user={}'.format(os.getlogin())
        elif caller_type == 'identity':
            caller_id = 'identity={}'.format(self.config.get('identity'))
        else:
            caller_id = 'uid={}'.format(os.getuid())

        self.logger.debug('Setting callerid to %s based on callertype=%s', caller_id, caller_type)
        return caller_id
Example #10
0
def profile_result(func):
    """ decorator for start,end event function profiling with netlogger.
    return value is logged as result.
    """
    if os.getenv('NETLOGGER_ON', False) in (
        'off','0','no','false','',False):    
        return func
    if type(func) is not types.FunctionType:
        return func
    
    if func.__module__ == '__main__':
        f = func.func_globals['__file__'] or 'unknown'
        event = '%s' %os.path.splitext(os.path.basename(f))[0]
        log = _logger('script')
        log.set_meta(file=f, pid=os.getpid(), ppid=os.getppid(), gpid=os.getgid())
    else:
        event = '%s' %func.func_name
        log = _logger('%s' %func.__module__)
        log.set_meta(pid=os.getpid(), ppid=os.getppid(), gpid=os.getgid())

    def nl_profile_func(*args, **kw):
        log.debug("%s.start" %event)
        try:
            v = func(*args, **kw)
        except:
            log.error("%s.end" %event)
            raise
        log.debug("%s.end" %event, result=v)
        return v

    return nl_profile_func
Example #11
0
def _chugid(runas):
    uinfo = pwd.getpwnam(runas)

    if os.getuid() == uinfo.pw_uid and os.getgid() == uinfo.pw_gid:
        # No need to change user or group
        return

    # No logging can happen on this function
    #
    # 08:46:32,161 [salt.loaded.int.module.cmdmod:276 ][DEBUG   ] stderr: Traceback (most recent call last):
    #   File "/usr/lib/python2.7/logging/__init__.py", line 870, in emit
    #     self.flush()
    #   File "/usr/lib/python2.7/logging/__init__.py", line 832, in flush
    #     self.stream.flush()
    # IOError: [Errno 9] Bad file descriptor
    # Logged from file cmdmod.py, line 59
    # 08:46:17,481 [salt.loaded.int.module.cmdmod:59  ][DEBUG   ] Switching user 0 -> 1008 and group 0 -> 1012 if needed
    #
    # apparently because we closed fd's on Popen, though if not closed, output
    # would also go to it's stderr

    if os.getgid() != uinfo.pw_gid:
        try:
            os.setgid(uinfo.pw_gid)
        except OSError, err:
            raise CommandExecutionError(
                'Failed to change from gid {0} to {1}. Error: {2}'.format(
                    os.getgid(), uinfo.pw_gid, err
                )
            )
    def userinfo(self, old_path=""):
        """
        Display some user information related to the filemanager functionality.
        """
        # Change the global page title:
        self.context["PAGE"].title = _(
            "Filemanager - Display some user information"
        )

        import pwd, grp

        uid = os.getuid()
        gid = os.getgid()

        pwd_info = pwd.getpwuid(os.getuid())
        grp_info = grp.getgrgid(os.getgid())

        context = {
            "filelist_link": self.URLs.methodLink(
                method_name="filelist", args=old_path
            ),
            "uid": uid,
            "gid": gid,
            "pwd_info": pwd_info,
            "grp_info": grp_info,
        }
#        self.page_msg(context)
        self._render_template("userinfo", context)#, debug=True)
Example #13
0
def drop_privileges(uid_name="nobody", gid_name="nogroup"):
    import pwd, grp

    starting_uid = os.getuid()
    starting_gid = os.getgid()

    starting_uid_name = pwd.getpwuid(starting_uid)[0]

    if os.getuid() != 0:
        # We're not root so, like, whatever dude
        return

    if starting_uid == 0:

        # Get the uid/gid from the name
        running_uid = pwd.getpwnam(uid_name)[2]
        # running_gid = grp.getgrnam(gid_name)[2]

        # Try setting the new uid/gid
        # os.setgid(running_gid)
        os.setuid(running_uid)

        new_umask = 077
        old_umask = os.umask(new_umask)
        sys.stderr.write("drop_privileges: Old umask: %s, new umask: %s\n" % (oct(old_umask), oct(new_umask)))

    final_uid = os.getuid()
    final_gid = os.getgid()
    sys.stderr.write("drop_privileges: running as %s/%s\n" % (pwd.getpwuid(final_uid)[0], grp.getgrgid(final_gid)[0]))
def main():
    from optparse import OptionParser
    parser = OptionParser()
    parser.add_option('-b', '--bundle-id', dest='bundle_id', default=None,
                      help='Identifier of the activity bundle')
    parser.add_option('-a', '--activity-id', dest='activity_id', default=None,
                      help='Identifier of the activity instance')
    parser.add_option('-o', '--object-id', dest='object_id', default=None,
                      help='Identifier of the associated datastore object')
    parser.add_option('-u', '--uri', dest='uri', default=None,
                      help='URI to load')
    parser.add_option('--languages', action='store_true',
                      help='Print out the set of languages supported, and quit')
    (options, args) = parser.parse_args()
    if options.languages:
        # firefox substitutes - for _
        print ' '.join(l.replace('_','-') for l in LANGUAGES)
        return

    # XXX in the future we should do something with the -b/a/o args.

    # if 'constant-uid' is enabled (stable build 758 and later),
    # move $HOME down one level, to where we have persistent storage
    if os.getuid() == os.getgid():
        os.environ['HOME'] = os.environ['SUGAR_ACTIVITY_ROOT'] + '/data'
    # sanitize LANG; firefox crashes if it sees a LANG it doesn't know.
    os.environ['LANG'] = sanitize(os.environ['LANG'])+'.utf-8'

    ff = [ './firefox' ]
    if options.uri is not None:
        ff += [ options.uri ]
    print os.getgid()
    os.execl(ff[0], *ff)
    os.abort() # should never reach here.
Example #15
0
def setupLogs(options):
    logger = logging.getLogger()

    if options.trace or options.logfile:
        loglevel = getattr(logging, options.loglevel.upper())

        f = logging.Formatter('%(asctime)s %(filename)s %(levelname)s %(message)s', datefmt='%Y-%m-%dT%H:%M:%S')

        logger.setLevel(loglevel)

        if options.trace:
            s = logging.StreamHandler()
            s.setLevel(loglevel)
            s.setFormatter(f)

            logging.getLogger('').addHandler(s)

        if options.logfile:
            fh = logging.FileHandler(options.logfile)
            fh.setLevel(loglevel)
            fh.setFormatter(f)

            logging.getLogger('').addHandler(fh)

    logger.debug('workdir = {}'.format(options.workdir))
    logger.debug('oedir = {}'.format(options.oedir))
    logger.debug('svnloc = {}'.format(options.svnloc))
    logger.debug('attemptsdir = {}'.format(options.attemptsdir))
    logger.debug('uid = {} = {}'.format(os.getuid(), pwd.getpwuid(os.getuid()).pw_name))
    logger.debug('euid = {} = {}'.format(os.geteuid(), pwd.getpwuid(os.geteuid()).pw_name))
    logger.debug('gid = {} = {}'.format(os.getgid(), grp.getgrgid(os.getgid()).gr_name))
    logger.debug('egid = {} = {}'.format(os.getegid(), grp.getgrgid(os.getegid()).gr_name))

    return logger
Example #16
0
 def __init__(self, configd_dir):
     now = time.time()
     # configd_dir is the directory to be watched by
     # self.dir_poller from a separate thread.
     self.configd_dir = configd_dir
     # initialise the list of "files". '/' is mandatory. '/config'
     # is where our combined ssh config lives.
     with configLock:
         self.files = {
             "/": dict(
                 st_mode=(stat.S_IFDIR | 0550),
                 st_uid=os.getuid(),
                 st_gid=os.getgid(),
                 st_nlink=2,
                 st_ctime=now,
                 st_mtime=now,
                 st_atime=now,
             ),
             "/config": dict(
                 st_mode=(stat.S_IFREG | 0400),
                 st_uid=os.getuid(),
                 st_gid=os.getgid(),
                 st_size=0,
                 st_nlink=1,
                 st_ctime=now,
                 st_mtime=now,
                 st_atime=now,
             ),
         }
         self.ssh_config = ""
     # we just started up, so generate the ssh config right now.
     logger.debug("Generating initial config")
     self.generate_config()
def chugid(runas):
    '''
    Change the current process to belong to
    the imputed user (and the groups he belongs to)
    '''
    uinfo = pwd.getpwnam(runas)
    supgroups = []
    supgroups_seen = set()
    # The line below used to exclude the current user's primary gid.
    # However, when root belongs to more than one group
    # this causes root's primary group of '0' to be dropped from
    # his grouplist.  On FreeBSD, at least, this makes some
    # command executions fail with 'access denied'.
    #
    # The Python documentation says that os.setgroups sets only
    # the supplemental groups for a running process.  On FreeBSD
    # this does not appear to be strictly true.
    group_list = get_group_dict(runas, include_default=True)
    if sys.platform == 'darwin':
        group_list = [a for a in group_list
                      if not a.startswith('_')]
    for group_name in group_list:
        gid = group_list[group_name]
        if (gid not in supgroups_seen
            and not supgroups_seen.add(gid)):
            supgroups.append(gid)

    if os.getgid() != uinfo.pw_gid:
        try:
            os.setgid(uinfo.pw_gid)
        except OSError as err:
            logger.error(
                'Failed to change from gid {0} to {1}. Error: {2}'.format(
                    os.getgid(), uinfo.pw_gid, err
                )
            )
            sys.exit(os.EX_OSERR)

    # Set supplemental groups
    if sorted(os.getgroups()) != sorted(supgroups):
        try:
            os.setgroups(supgroups)
        except OSError as err:
            logger.error(
                'Failed to set supplemental groups to {0}. Error: {1}'.format(
                    supgroups, err
                )
            )

    if os.getuid() != uinfo.pw_uid:
        try:
            os.setuid(uinfo.pw_uid)
        except OSError as err:
            logger.error(
                'Failed to change from uid {0} to {1}. Error: {2}'.format(
                    os.getuid(), uinfo.pw_uid, err
                )
            )
            sys.exit(os.EX_OSERR)
Example #18
0
 def test_chown(self):
     os = self.posix
     os.unlink(self.path)
     raises(OSError, os.chown, self.path, os.getuid(), os.getgid())
     f = open(self.path, "w")
     f.write("this is a test")
     f.close()
     os.chown(self.path, os.getuid(), os.getgid())
Example #19
0
 def test_uid(self):
     # We probably aren't root...
     my_user = pwd.getpwuid(os.getuid())[0]
     my_group = grp.getgrgid(os.getgid())[0]
     touch(self.tmppath('foo'), user=my_user, group=my_group)
     s = os.stat(self.tmppath('foo'))
     self.assertEqual(s.st_uid, os.getuid())
     self.assertEqual(s.st_gid, os.getgid())
Example #20
0
def dropPrivileges(uid, gid):
  """Drop privileges to uid, gid if current uid is 0

  Do tests to check if dropping was successful and that no system call is able
  to re-raise dropped privileges

  Does nothing in case if uid and gid are not 0
  """
  logger = logging.getLogger('dropPrivileges')
  # XXX-Cedric: remove format / just do a print, otherwise formatting is done
  # twice
  current_uid, current_gid = os.getuid(), os.getgid()
  if uid == 0 or gid == 0:
    raise OSError('Dropping privileges to uid = %r or ' \
                                      'gid = %r is too dangerous' % (uid, gid))
  if not(current_uid == 0 and current_gid == 0):
    logger.debug('Running as uid = %r, gid = %r, dropping not needed and not '
        'possible' % (current_uid, current_gid))
    return
  # drop privileges
  user_name = pwd.getpwuid(uid)[0]
  group_list = set([x.gr_gid for x in grp.getgrall() if user_name in x.gr_mem])
  group_list.add(gid)
  os.initgroups(pwd.getpwuid(uid)[0], gid)
  os.setgid(gid)
  os.setuid(uid)

  # assert that privileges are dropped
  message_pre = 'After dropping to uid = %r and gid = %r ' \
                'and group_list = %s' % (
                            uid, gid, group_list)
  new_uid, new_gid, new_group_list = os.getuid(), os.getgid(), os.getgroups()
  if not (new_uid == uid and new_gid == gid and set(new_group_list) == group_list):
    raise OSError('%s new_uid = %r and new_gid = %r and ' \
                                      'new_group_list = %r which is fatal.'
                                      % (message_pre,
                                         new_uid,
                                         new_gid,
                                         new_group_list))

  # assert that it is not possible to go back to running one
  try:
    try:
      os.setuid(current_uid)
    except OSError:
      try:
        os.setgid(current_gid)
      except OSError:
        try:
          os.setgroups([current_gid])
        except OSError:
          raise
  except OSError:
    pass
  else:
    raise ValueError('%s it was possible to go back to uid = %r and gid = '
        '%r which is fatal.' % (message_pre, current_uid, current_gid))
  logger.debug('Succesfully dropped privileges to uid=%r gid=%r' % (uid, gid))
Example #21
0
 def change_permissions(self):
     if self.config['group']:
         helper.chown(self.config['log_file'], os.getuid(), self.config['group'])
         helper.chown(self.config['pid_file'], os.getuid(), self.config['group'])
         helper.set_gid(self.config['group'])
     if self.config['user']:
         helper.chown(self.config['log_file'], self.config['user'], os.getgid())
         helper.chown(self.config['pid_file'], self.config['user'], os.getgid())
         helper.set_uid(self.config['user'])
Example #22
0
def change_permissions():
    if CONFIG['group']:
        helper.chown(CONFIG['log_file'], os.getuid(), CONFIG['group'])
        helper.chown(CONFIG['pid_file'], os.getuid(), CONFIG['group'])
        helper.set_gid(CONFIG['group'])
    if CONFIG['user']:
        helper.chown(CONFIG['log_file'], CONFIG['user'], os.getgid())
        helper.chown(CONFIG['pid_file'], CONFIG['user'], os.getgid())
        helper.set_uid(CONFIG['user'])
Example #23
0
    def run(self):
        debug("ProxyProcess.run() pid=%s, uid=%s, gid=%s", os.getpid(), os.getuid(), os.getgid())
        #change uid and gid:
        if os.getgid()!=self.gid:
            os.setgid(self.gid)
        if os.getuid()!=self.uid:
            os.setuid(self.uid)
        debug("ProxyProcess.run() new uid=%s, gid=%s", os.getuid(), os.getgid())

        if self.env_options:
            #TODO: whitelist env update?
            os.environ.update(self.env_options)

        log.info("new proxy started for client %s and server %s", self.client_conn, self.server_conn)

        signal.signal(signal.SIGTERM, self.signal_quit)
        signal.signal(signal.SIGINT, self.signal_quit)
        debug("registered signal handler %s", self.signal_quit)

        make_daemon_thread(self.server_message_queue, "server message queue").start()

        self.main_queue = Queue()
        #setup protocol wrappers:
        self.server_packets = Queue(PROXY_QUEUE_SIZE)
        self.client_packets = Queue(PROXY_QUEUE_SIZE)
        self.client_protocol = Protocol(self, self.client_conn, self.process_client_packet, self.get_client_packet)
        self.client_protocol.restore_state(self.client_state)
        self.server_protocol = Protocol(self, self.server_conn, self.process_server_packet, self.get_server_packet)
        #server connection tweaks:
        self.server_protocol.large_packets.append("draw")
        self.server_protocol.large_packets.append("window-icon")
        self.server_protocol.large_packets.append("keymap-changed")
        self.server_protocol.large_packets.append("server-settings")
        self.server_protocol.set_compression_level(self.session_options.get("compression_level", 0))

        self.lost_windows = set()
        self.encode_queue = Queue()
        self.encode_thread = make_daemon_thread(self.encode_loop, "encode")
        self.encode_thread.start()

        debug("starting network threads")
        self.server_protocol.start()
        self.client_protocol.start()

        #forward the hello packet:
        hello_packet = ("hello", self.filter_client_caps(self.caps))
        self.queue_server_packet(hello_packet)

        try:
            try:
                self.run_queue()
            except KeyboardInterrupt, e:
                self.stop(str(e))
        finally:
            debug("ProxyProcess.run() ending %s", os.getpid())
Example #24
0
 def does_stuff():
     # xxx not really a test, just checks that they are callable
     os.chown(tmpfile1, os.getuid(), os.getgid())
     os.lchown(tmpfile1, os.getuid(), os.getgid())
     os.lchown(tmpfile2, os.getuid(), os.getgid())
     try:
         os.chown(tmpfile2, os.getuid(), os.getgid())
     except OSError:
         pass
     else:
         raise AssertionError("os.chown(broken symlink) should raise")
Example #25
0
File: cmdmod.py Project: hulu/salt
def _chugid(runas):
    uinfo = pwd.getpwnam(runas)
    supgroups_seen = set()
    supgroups = [
        g.gr_gid for g in grp.getgrall()
        if uinfo.pw_name in g.gr_mem and g.gr_gid != uinfo.pw_gid
        and g.gr_gid not in supgroups_seen and not supgroups_seen.add(g.gr_gid)
    ]

    # No logging can happen on this function
    #
    # 08:46:32,161 [salt.loaded.int.module.cmdmod:276 ][DEBUG   ] stderr: Traceback (most recent call last):
    #   File "/usr/lib/python2.7/logging/__init__.py", line 870, in emit
    #     self.flush()
    #   File "/usr/lib/python2.7/logging/__init__.py", line 832, in flush
    #     self.stream.flush()
    # IOError: [Errno 9] Bad file descriptor
    # Logged from file cmdmod.py, line 59
    # 08:46:17,481 [salt.loaded.int.module.cmdmod:59  ][DEBUG   ] Switching user 0 -> 1008 and group 0 -> 1012 if needed
    #
    # apparently because we closed fd's on Popen, though if not closed, output
    # would also go to its stderr

    if os.getgid() != uinfo.pw_gid:
        try:
            os.setgid(uinfo.pw_gid)
        except OSError as err:
            raise CommandExecutionError(
                'Failed to change from gid {0} to {1}. Error: {2}'.format(
                    os.getgid(), uinfo.pw_gid, err
                )
            )

    # Set supplemental groups
    if sorted(os.getgroups()) != sorted(supgroups):
        try:
            os.setgroups(supgroups)
        except OSError as err:
            raise CommandExecutionError(
                'Failed to set supplemental groups to {0}. Error: {1}'.format(
                    supgroups, err
                )
            )

    if os.getuid() != uinfo.pw_uid:
        try:
            os.setuid(uinfo.pw_uid)
        except OSError as err:
            raise CommandExecutionError(
                'Failed to change from uid {0} to {1}. Error: {2}'.format(
                    os.getuid(), uinfo.pw_uid, err
                )
            )
Example #26
0
 def terminated(self):
     output_dir = self.output_dir
     # work directory is the parent of the download directory
     work_dir = os.path.dirname(output_dir)
     # move around output files so they're easier to preprocess:
     #   1. All '.fasc' files land in the same directory as the input '.pdb' file
     #   2. All generated '.pdb'/'.pdb.gz' files are collected in a '.decoys.tar'
     #   3. Anything else is left as-is
     input_name = os.path.basename(self.pdb_file_path)
     input_name_sans = os.path.splitext(input_name)[0]
     output_tar_filename = os.path.join(output_dir, 'docking_protocol.tar.gz')
     # count: 'protocols.jobdist.main: Finished 1brs.0--1.1brs_0002 in 149 seconds.'
     if os.path.exists(output_tar_filename):
         output_tar = tarfile.open(output_tar_filename, 'r:gz')
         # single tar file holding all decoy .PDB files
         pdbs_tarfile_path = os.path.join(work_dir, input_name_sans) + '.decoys.tar'
         if self.collect:
             if not os.path.exists(pdbs_tarfile_path):
                 pdbs = tarfile.open(pdbs_tarfile_path, 'w')
             else:
                 pdbs = tarfile.open(pdbs_tarfile_path, 'a')
         for entry in output_tar:
             if (entry.name.endswith('.fasc') or entry.name.endswith('.sc')):
                 filename, extension = os.path.splitext(entry.name)
                 scoring_file_name = (os.path.join(work_dir, input_name_sans)
                                      + '.' + self.jobname + extension)
                 src = output_tar.extractfile(entry)
                 dst = open(scoring_file_name, 'wb')
                 dst.write(src.read())
                 dst.close()
                 src.close()
             elif (self.collect and
                   (entry.name.endswith('.pdb.gz') or entry.name.endswith('.pdb'))):
                 src = output_tar.extractfile(entry)
                 dst = tarfile.TarInfo(entry.name)
                 dst.size = entry.size
                 dst.type = entry.type
                 dst.mode = entry.mode
                 dst.mtime = entry.mtime
                 dst.uid = os.getuid()
                 dst.gid = os.getgid()
                 dst.uname = pwd.getpwuid(os.getuid()).pw_name
                 dst.gname = grp.getgrgid(os.getgid()).gr_name
                 if hasattr(entry, 'pax_headers'):
                     dst.pax_headers = entry.pax_headers
                 pdbs.addfile(dst, src)
                 src.close()
         if self.collect:
             pdbs.close()
     else: # no `docking_protocol.tar.gz` file
         self.info = ("No 'docking_protocol.tar.gz' file found.")
Example #27
0
def test_valid_specifiers(*, user):
    test_content('f {} - - - - two words', 'two words', user=user)
    if id128:
        try:
            test_content('f {} - - - - %m', '{}'.format(id128.get_machine().hex), user=user)
        except AssertionError as e:
            print(e)
            print('/etc/machine-id: {!r}'.format(open('/etc/machine-id').read()))
            print('/proc/cmdline: {!r}'.format(open('/proc/cmdline').read()))
            print('skipping')
        test_content('f {} - - - - %b', '{}'.format(id128.get_boot().hex), user=user)
    test_content('f {} - - - - %H', '{}'.format(socket.gethostname()), user=user)
    test_content('f {} - - - - %v', '{}'.format(os.uname().release), user=user)
    test_content('f {} - - - - %U', '{}'.format(os.getuid()), user=user)
    test_content('f {} - - - - %G', '{}'.format(os.getgid()), user=user)

    puser = pwd.getpwuid(os.getuid())
    test_content('f {} - - - - %u', '{}'.format(puser.pw_name), user=user)

    pgroup = grp.getgrgid(os.getgid())
    test_content('f {} - - - - %g', '{}'.format(pgroup.gr_name), user=user)

    # Note that %h is the only specifier in which we look the environment,
    # because we check $HOME. Should we even be doing that?
    home = os.path.expanduser("~")
    test_content('f {} - - - - %h', '{}'.format(home), user=user)

    xdg_runtime_dir = os.getenv('XDG_RUNTIME_DIR')
    if xdg_runtime_dir is not None or not user:
        test_content('f {} - - - - %t',
                     xdg_runtime_dir if user else '/run',
                     user=user)

    xdg_config_home = os.getenv('XDG_CONFIG_HOME')
    if xdg_config_home is not None or not user:
        test_content('f {} - - - - %S',
                     xdg_config_home if user else '/var/lib',
                     user=user)

    xdg_cache_home = os.getenv('XDG_CACHE_HOME')
    if xdg_cache_home is not None or not user:
        test_content('f {} - - - - %C',
                     xdg_cache_home if user else '/var/cache',
                     user=user)

    if xdg_config_home is not None or not user:
        test_content('f {} - - - - %L',
                     xdg_config_home + '/log' if user else '/var/log',
                     user=user)

    test_content('f {} - - - - %%', '%', user=user)
Example #28
0
 def getattr(self, path):
     p = os.path.dirname(path)
     from DIRAC.Resources.Catalog.FileCatalogClient import FileCatalogClient
     self.result = FileCatalogClient().listDirectory(p,True)
     st = fuse.Stat()
     #print "+++++++++++++++++++++ "+self.SE
     #print p
     #print self.proxy
     #print self.result['Value']['Successful'][p]
     if path=='/':
         st.st_mode = (stat.S_IFDIR | 0755)
         st.st_ino = 0
         st.st_dev = 0
         st.st_nlink = 2
         st.st_uid = os.getuid()
         st.st_gid = os.getgid()
         st.st_size = 4096
         st.st_atime = time.time()
         st.st_mtime = time.time()
         st.st_ctime = time.time()
     elif self.result["OK"] and self.result['Value']['Successful'][p]['Files'].get(path):
         md = self.result['Value']['Successful'][p]['Files'][path]['MetaData']
         st.st_mode = (stat.S_IFREG | md['Mode'])
         st.st_ino = 0
         st.st_dev = 0
         st.st_nlink = 1
         st.st_uid = os.getuid() if md['Owner']==self.proxy['Value']['username'] else 65534
         st.st_gid = os.getgid() if md['OwnerGroup']==self.proxy['Value']['group'] else 65534
         st.st_size = md['Size']
         st.st_atime = time.mktime(md['ModificationDate'].timetuple())
         st.st_mtime = time.mktime(md['ModificationDate'].timetuple())
         st.st_ctime = time.mktime(md['CreationDate'].timetuple())
     elif self.result["OK"] and (self.result['Value']['Successful'][p]['SubDirs'].get('/'+path) or self.result['Value']['Successful'][p]['SubDirs'].get(path)):
         #md = self.result['Value']['Successful'][p]['SubDirs']['/'+path] if p=='/' else self.result['Value']['Successful'][p]['SubDirs'][path]
         # change to DIRAC version v6r12p16
         md = self.result['Value']['Successful'][p]['SubDirs'][path]
         st.st_mode = (stat.S_IFDIR | md['Mode'])
         st.st_ino = 0
         st.st_dev = 0
         st.st_nlink = 2
         st.st_uid = os.getuid() if md['Owner']==self.proxy['Value']['username'] else 65534
         st.st_gid = os.getgid() if md['OwnerGroup']==self.proxy['Value']['group'] else 65534
         st.st_size = 4096
         st.st_atime = time.mktime(md['ModificationDate'].timetuple())
         st.st_mtime = time.mktime(md['ModificationDate'].timetuple())
         st.st_ctime = time.mktime(md['CreationDate'].timetuple())
     else :
         return -errno.ENOENT
     return st
Example #29
0
def import_portage():
	try:
		from portage import data as portage_data
	except ImportError:
		import portage_data
	# If we're not already root or in the portage group, we make the gid of the
	# current process become portage_gid.
	if os.getgid() != 0 and portage_data.portage_gid not in os.getgroups():
		portage_data.portage_gid = os.getgid()
		portage_data.secpass = 1

	os.environ["PORTAGE_LEGACY_GLOBALS"] = "false"
	import portage
	del os.environ["PORTAGE_LEGACY_GLOBALS"]
	return portage
Example #30
0
def main(argv):
  def echo(s):
    print s;
  
  u = pwd.getpwnam("udoprog");
  
  pid, status = doas(u.pw_uid, u.pw_gid, echo, "Hello World");
  
  print status;
  print os.getgid();
  print os.getuid();
  
  pid, status = doas(u.pw_uid, u.pw_gid, open, "/test.txt", "w");
  
  return 0;
Example #31
0
    def _install_indv_pkg(self, pkg_name, pkg_file):
        '''
        Install one individual package
        '''
        self.ui.status('... installing {0}'.format(pkg_name))
        formula_tar = tarfile.open(pkg_file, 'r:bz2')
        formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(pkg_name))
        formula_def = yaml.safe_load(formula_ref)

        for field in ('version', 'release', 'summary', 'description'):
            if field not in formula_def:
                raise SPMPackageError(
                    'Invalid package: the {0} was not found'.format(field))

        pkg_files = formula_tar.getmembers()

        # First pass: check for files that already exist
        existing_files = self._pkgfiles_fun('check_existing', pkg_name,
                                            pkg_files, formula_def)

        if existing_files and not self.opts['force']:
            raise SPMPackageError(
                'Not installing {0} due to existing files:\n\n{1}'.format(
                    pkg_name, '\n'.join(existing_files)))

        # We've decided to install
        self._pkgdb_fun('register_pkg', pkg_name, formula_def, self.db_conn)

        # Run the pre_local_state script, if present
        if 'pre_local_state' in formula_def:
            high_data = self._render(formula_def['pre_local_state'],
                                     formula_def)
            ret = self.caller.cmd('state.high', data=high_data)
        if 'pre_tgt_state' in formula_def:
            log.debug('Executing pre_tgt_state script')
            high_data = self._render(formula_def['pre_tgt_state']['data'],
                                     formula_def)
            tgt = formula_def['pre_tgt_state']['tgt']
            ret = self.client.run_job(
                tgt=formula_def['pre_tgt_state']['tgt'],
                fun='state.high',
                tgt_type=formula_def['pre_tgt_state'].get('tgt_type', 'glob'),
                timout=self.opts['timeout'],
                data=high_data,
            )

        # No defaults for this in config.py; default to the current running
        # user and group
        import salt.utils
        if salt.utils.is_windows():
            import salt.utils.win_functions
            uname = gname = salt.utils.win_functions.get_current_user()
            uname_sid = salt.utils.win_functions.get_sid_from_name(uname)
            uid = self.opts.get('spm_uid', uname_sid)
            gid = self.opts.get('spm_gid', uname_sid)
        else:
            uid = self.opts.get('spm_uid', os.getuid())
            gid = self.opts.get('spm_gid', os.getgid())
            uname = pwd.getpwuid(uid)[0]
            gname = grp.getgrgid(gid)[0]

        # Second pass: install the files
        for member in pkg_files:
            member.uid = uid
            member.gid = gid
            member.uname = uname
            member.gname = gname

            out_path = self._pkgfiles_fun('install_file', pkg_name,
                                          formula_tar, member, formula_def,
                                          self.files_conn)
            if out_path is not False:
                if member.isdir():
                    digest = ''
                else:
                    self._verbose(
                        'Installing file {0} to {1}'.format(
                            member.name, out_path), log.trace)
                    file_hash = hashlib.sha1()
                    digest = self._pkgfiles_fun(
                        'hash_file', os.path.join(out_path, member.name),
                        file_hash, self.files_conn)
                self._pkgdb_fun('register_file', pkg_name, member, out_path,
                                digest, self.db_conn)

        # Run the post_local_state script, if present
        if 'post_local_state' in formula_def:
            log.debug('Executing post_local_state script')
            high_data = self._render(formula_def['post_local_state'],
                                     formula_def)
            self.caller.cmd('state.high', data=high_data)
        if 'post_tgt_state' in formula_def:
            log.debug('Executing post_tgt_state script')
            high_data = self._render(formula_def['post_tgt_state']['data'],
                                     formula_def)
            tgt = formula_def['post_tgt_state']['tgt']
            ret = self.client.run_job(
                tgt=formula_def['post_tgt_state']['tgt'],
                fun='state.high',
                tgt_type=formula_def['post_tgt_state'].get('tgt_type', 'glob'),
                timout=self.opts['timeout'],
                data=high_data,
            )

        formula_tar.close()
Example #32
0
writable_p = partial(access, mode=os.W_OK)
executable_p = partial(access, mode=os.EX_OK)

def owned_by_user(user_id, f):
    return os.lstat(f).st_uid == user_id

def owned_by_group(group_id, f):
    return os.lstat(f).st_gid == group_id

nobody_uid = getpwnam('nobody').pw_uid
nobody_gid = getpwnam('nobody').pw_gid

owned_by_nobody_p = partial(owned_by_user, nobody_uid)
owned_by_nobody_group_p = partial(owned_by_group, nobody_gid)
owned_by_me_p = partial(owned_by_user, os.getuid())
owned_by_my_group_p = partial(owned_by_group, os.getgid())

def time_since(action, delta, f):
    statattr = {'modification': 'st_mtime', 'creation': 'st_ctime', 'access': 'st_atime'}[action]
    timestamp = getattr(os.lstat(f), statattr)
    return datetime.now() < datetime.fromtimestamp(timestamp) + delta

accessed_since_60minutes_filep = partial(time_since, 'access', timedelta(minutes=60))
modified_since_60minutes_filep = partial(time_since, 'modification', timedelta(minutes=60))
created_since_60minutes_filep = partial(time_since, 'creation', timedelta(minutes=60))

interesting_extensions = ['.' + e for e in 'cfg rtf config txt c pl gz bz2 7z log tar tgz sql properties xml'.split()]

def interesting_filep(fname):
    ext = os.path.splitext(fname)[1].lower()
    return ext in interesting_extensions
Example #33
0
    def __init__(self):

        if GLSettingsClass.initialized:
            error_msg = "Singleton GLSettingClass instanced twice!"
            raise Exception(error_msg)
        else:
            GLSettingsClass.initialized = True

        # command line parsing utils
        self.parser = OptionParser()
        self.cmdline_options = None

        # version
        self.version_string = __version__

        # daemon
        self.nodaemon = False

        # threads sizes
        self.db_thread_pool_size = 1

        self.bind_addresses = '127.0.0.1'

        # bind port
        self.bind_port = 8082

        # store name
        self.store_name = 'main_store'

        # Database variables for MYSQL
        self.db_username = '******'
        self.db_password = '******'
        self.db_hostname = 'localhost'
        # Can either be sqlite or mysql
        self.db_type = 'sqlite'
        # Database version tracking
        self.db_version = DATABASE_VERSION

        # debug defaults
        self.storm_debug = False
        self.http_log = -1
        self.http_log_counter = 0
        self.loglevel = "CRITICAL"

        # files and paths
        self.root_path = os.path.abspath(
            os.path.join(os.path.dirname(__file__), '..'))
        self.pid_path = '/var/run/globaleaks'
        self.working_path = '/var/globaleaks'
        self.static_source = '/usr/share/globaleaks/glbackend'
        self.glclient_path = '/usr/share/globaleaks/glclient'
        self.ramdisk_path = '/dev/shm/globaleaks'
        if not os.path.isdir(self.ramdisk_path):
            self.ramdisk_path = tempfile.mkdtemp()

        # list of plugins available in the software
        self.notification_plugins = [
            'MailNotification',
        ]

        # session tracking, in the singleton classes
        self.sessions = dict()
        self.failed_login_attempts = 0  # statisticals, referred to latest_period
        # and resetted by session_management sched

        # download tocken trackin
        self.download_tokens = dict()

        # static file rules
        self.staticfile_regexp = r'(.*)'
        self.staticfile_overwrite = False
        self.images_extensions = (".jpg", ".jpeg", ".png", ".gif")
        self.css_extensions = ".css"
        self.reserved_names = OD()
        self.reserved_names.logo = "globaleaks_logo"
        self.reserved_names.css = "custom_stylesheet"

        # acceptable 'Host:' header in HTTP request
        self.accepted_hosts = "127.0.0.1,localhost"

        # default timings for scheduled jobs
        self.session_management_minutes_delta = 1  # runner.py function expects minutes
        self.cleaning_hours_delta = 6  # runner.py function expects hours
        self.notification_minutes_delta = 2  # runner.py function expects minutes
        self.delivery_seconds_delta = 20  # runner.py function expects seconds
        self.anomaly_seconds_delta = 30  # runner.py function expects seconds
        self.stats_minutes_delta = 10  # runner.py function expects minutes
        self.pgp_check_hours_delta = 24  # runner.py function expects hours

        self.www_form_urlencoded_maximum_size = 1024

        self.defaults = OD()
        # Default values, used to initialize DB at the first start,
        # or whenever the value is not supply by client.
        # These value are then stored in the single instance
        # (Node, Receiver or Context) and then can be updated by
        # the admin using the Admin interface (advanced settings)
        self.defaults.allow_unencrypted = False
        self.defaults.tor2web_admin = False
        self.defaults.tor2web_submission = False
        self.defaults.tor2web_receiver = False
        self.defaults.tor2web_unauth = True
        self.defaults.anomaly_checks = False
        self.defaults.maximum_namesize = 128
        self.defaults.maximum_textsize = 4096
        self.defaults.maximum_filesize = 30  # expressed in megabytes
        self.defaults.exception_email = u"*****@*****.**"
        # Context dependent values:
        self.defaults.receipt_regexp = u'[0-9]{16}'
        self.defaults.tip_seconds_of_life = (3600 * 24) * 15
        self.defaults.submission_seconds_of_life = (3600 * 24) * 3
        self.defaults.languages_enabled = ['en']

        self.memory_copy = OD()
        # Some operation, like check for maximum file, can't access
        # to the DB every time. So when some Node values are updated
        # here are copied, in order to permit a faster comparison
        self.memory_copy.maximum_filesize = self.defaults.maximum_filesize
        self.memory_copy.maximum_textsize = self.defaults.maximum_textsize
        self.memory_copy.maximum_namesize = self.defaults.maximum_namesize
        self.memory_copy.allow_unencrypted = self.defaults.allow_unencrypted
        self.memory_copy.tor2web_admin = self.defaults.tor2web_admin
        self.memory_copy.tor2web_submission = self.defaults.tor2web_submission
        self.memory_copy.tor2web_receiver = self.defaults.tor2web_receiver
        self.memory_copy.tor2web_unauth = self.defaults.tor2web_unauth
        self.memory_copy.anomaly_checks = self.defaults.anomaly_checks
        self.memory_copy.exception_email = self.defaults.exception_email
        # updated by globaleaks/db/__init__.import_memory_variables
        self.memory_copy.default_language = 'en'
        self.memory_copy.notif_server = None
        self.memory_copy.notif_port = None
        self.memory_copy.notif_username = None
        self.memory_copy.notif_security = None
        # import_memory_variables is called after create_tables and node+notif updating

        self.anomalies_counter = dict(external_counted_events)
        # this dict keep track of some 'external' events and is
        # cleaned periodically (10 minutes in stats)
        self.anomalies_list = []
        # this is the collection of the messages shall be reported to the admin
        self.anomalies_messages = []
        # maximum amount of element riported by /admin/anomalies and /admin/stats
        self.anomalies_report_limit = 20

        # Default delay threshold
        self.delay_threshold = 0.800

        # a dict to keep track of the lifetime of the session. at the moment
        # not exported in the UI.
        # https://github.com/globaleaks/GlobaLeaks/issues/510
        self.defaults.lifetimes = {
            'admin': (60 * 60),
            'receiver': (60 * 60),
            'wb': (60 * 60)
        }

        # unchecked_tor_input contains information that cannot be validated now
        # due to complex inclusions or requirements. Data is used in
        # globaleaks.db.datainit.apply_cli_options()
        self.unchecked_tor_input = {}

        # SOCKS default
        self.socks_host = "127.0.0.1"
        self.socks_port = 9050
        self.tor_socks_enable = True

        # https://github.com/globaleaks/GlobaLeaks/issues/647
        # we've struck a notification settings in a server, due to an
        # error looping thru email. A temporary way to disable mail
        # is put here. A globaleaks restart cause the email to restart.
        self.notification_temporary_disable = False
        self.notification_limit = 30

        self.user = getpass.getuser()
        self.group = getpass.getuser()
        self.uid = os.getuid()
        self.gid = os.getgid()
        self.start_clean = False
        self.devel_mode = False
        self.skip_wizard = False
        self.glc_path = None

        # Number of failed login enough to generate an alarm
        self.failed_login_alarm = 5

        # Number of minutes in which a user is prevented to login in case of triggered alarm
        self.failed_login_block_time = 5

        # Size in bytes of every log file. Once this size is reached the
        # logfile is rotated.
        # Default: 1M
        self.log_file_size = 1000000
        # Number of log files to conserve.
        self.maximum_rotated_log_files = 100

        # Disk file encryption in realtime
        # if the key is fine or is not.
        # this key permit Globaleaks to resist on application restart
        # not to a reboot! (is written in GLSetting.
        # key is initialized and stored in key path.
        # key_id contains an identifier of the key (when system reboots,
        # key changes.
        ### you can read more about this security measure in the document:
        ### https://github.com/globaleaks/GlobaLeaks/wiki/Encryption
        self.AES_key_size = 32
        # This key_id is just to identify the keys, and is generated with
        self.AES_key_id_regexp = u'[A-Za-z0-9]{16}'
        self.AES_counter_nonce = 128 / 8
        self.AES_file_regexp = r'(.*)\.aes'
        self.AES_file_regexp_comp = re.compile(self.AES_file_regexp)
        self.AES_keyfile_prefix = "aeskey-"

        self.exceptions = {}

        # Extreme debug option triggered by --XXX, that's are the defaults
        self.debug_option_in_the_future = 0
        self.debug_option_UUID_human = ""
        self.debug_UUID_human_counter = 0
        self.debug_option_mlockall = False
 def GetPrimaryGid():
     return os.getgid()
Example #35
0
# values may save on connection overhead and latency.
conn_bufsize = 393216

# This is used in the CacheCollatedPostProcess and MiscIterToFile
# classes.  The number represents the number of rpaths which may be
# stuck in buffers when moving over a remote connection.
pipeline_max_length = 500

# True if script is running as a server
server = None

# uid and gid of the owner of the rdiff-backup process.  This can
# vary depending on the connection.
try:
    process_uid = os.getuid()
    process_gid = os.getgid()
    process_groups = [process_gid] + os.getgroups()
except AttributeError:
    process_uid = 0
    process_gid = 0
    process_groups = [0]

# If true, when copying attributes, also change target's uid/gid
change_ownership = None

# If true, change the permissions of unwriteable mirror files
# (such as directories) so that they can be written, and then
# change them back.  This defaults to 1 just in case the process
# is not running as root (root doesn't need to change
# permissions).
change_mirror_perms = (process_uid != 0)
Example #36
0
    def __init__(self):
        # command line parsing utils
        self.parser = OptionParser()
        self.cmdline_options = None

        # version
        self.version_string = __version__

        # testing
        # This variable is to be able to hook/bypass code when unit-tests are run
        self.testing = False

        # daemonize the process
        self.nodaemon = False

        self.bind_address = '0.0.0.0'
        self.bind_remote_ports = [80, 443]
        self.bind_local_ports = [8082, 8083]

        self.db_type = 'sqlite'

        # debug defaults
        self.orm_debug = False

        # files and paths
        self.src_path = os.path.abspath(
            os.path.join(os.path.dirname(__file__), '..'))
        self.backend_script = os.path.abspath(
            os.path.join(self.src_path, 'globaleaks/backend.py'))

        self.pid_path = '/var/run/globaleaks'
        self.working_path = '/var/globaleaks'

        # TODO(bug-fix-italian-style) why is this set to the 2nd entry in the possible
        # client paths...? please fix.
        self.client_path = '/usr/share/globaleaks/client'
        for path in possible_client_paths:
            if os.path.exists(path):
                self.client_path = path
                break

        self.authentication_lifetime = 3600

        self.accept_submissions = True

        # statistical, referred to latest period
        # and resetted by session_management sched
        self.failed_login_attempts = 0

        # static file rules
        self.staticfile_regexp = r'(.*)'
        self.staticfile_overwrite = False

        self.local_hosts = ['127.0.0.1', 'localhost']

        self.onionservice = None

        # Default request time uniform value
        self.side_channels_guard = 150

        # SOCKS default
        self.socks_host = "127.0.0.1"
        self.socks_port = 9050

        self.key_bits = 4096
        self.csr_sign_bits = 512

        self.notification_limit = 30
        self.jobs_operation_limit = 20

        self.user = getpass.getuser()
        self.group = getpass.getuser()

        # Initialize to None since Windows doesn't have a direct 1:1 concept
        # of uid/gid.
        self.uid = None
        self.gid = None

        if platform.system() != 'Windows':
            self.uid = os.getuid()
            self.gid = os.getgid()

        self.devel_mode = False
        self.disable_swap = False
        self.enable_csp = True

        # Number of failed login enough to generate an alarm
        self.failed_login_alarm = 5

        # Number of minutes in which a user is prevented to login in case of triggered alarm
        self.failed_login_block_time = 5

        # Limit for log sizes and number of log files
        # https://github.com/globaleaks/GlobaLeaks/issues/1578
        self.log_size = 10000000  # 10MB
        self.log_file_size = 1000000  # 1MB
        self.num_log_files = self.log_size / self.log_file_size

        self.AES_key_id_regexp = u'[A-Za-z0-9]{16}'
        self.AES_file_regexp = r'(.*)\.aes'
        self.AES_file_regexp_comp = re.compile(self.AES_file_regexp)
        self.AES_keyfile_prefix = "aeskey-"

        self.exceptions_email_hourly_limit = 20

        self.enable_input_length_checks = True

        self.mail_timeout = 15  # seconds
        self.mail_attempts_limit = 3  # per mail limit

        self.acme_directory_url = 'https://acme-v02.api.letsencrypt.org/directory'

        self.enable_api_cache = True

        self.eval_paths()
Example #37
0
    def close(self):
        ''' terminate the connection '''

        cache_key = self._cache_key()
        SSH_CONNECTION_CACHE.pop(cache_key, None)
        SFTP_CONNECTION_CACHE.pop(cache_key, None)

        if hasattr(self, 'sftp'):
            if self.sftp is not None:
                self.sftp.close()

        if C.HOST_KEY_CHECKING and C.PARAMIKO_RECORD_HOST_KEYS and self._any_keys_added():

            # add any new SSH host keys -- warning -- this could be slow
            # (This doesn't acquire the connection lock because it needs
            # to exclude only other known_hosts writers, not connections
            # that are starting up.)
            lockfile = self.keyfile.replace("known_hosts", ".known_hosts.lock")
            dirname = os.path.dirname(self.keyfile)
            makedirs_safe(dirname)

            KEY_LOCK = open(lockfile, 'w')
            fcntl.lockf(KEY_LOCK, fcntl.LOCK_EX)

            try:
                # just in case any were added recently

                self.ssh.load_system_host_keys()
                self.ssh._host_keys.update(self.ssh._system_host_keys)

                # gather information about the current key file, so
                # we can ensure the new file has the correct mode/owner

                key_dir = os.path.dirname(self.keyfile)
                if os.path.exists(self.keyfile):
                    key_stat = os.stat(self.keyfile)
                    mode = key_stat.st_mode
                    uid = key_stat.st_uid
                    gid = key_stat.st_gid
                else:
                    mode = 33188
                    uid = os.getuid()
                    gid = os.getgid()

                # Save the new keys to a temporary file and move it into place
                # rather than rewriting the file. We set delete=False because
                # the file will be moved into place rather than cleaned up.

                tmp_keyfile = tempfile.NamedTemporaryFile(dir=key_dir, delete=False)
                os.chmod(tmp_keyfile.name, mode & 0o7777)
                os.chown(tmp_keyfile.name, uid, gid)

                self._save_ssh_host_keys(tmp_keyfile.name)
                tmp_keyfile.close()

                os.rename(tmp_keyfile.name, self.keyfile)

            except:

                # unable to save keys, including scenario when key was invalid
                # and caught earlier
                traceback.print_exc()
                pass
            fcntl.lockf(KEY_LOCK, fcntl.LOCK_UN)

        self.ssh.close()
Example #38
0
def _test_target_ssh_inherit(standardgroup, ui, use_ssh, src_path,
                             target_path):
    ds = Dataset(src_path).create()
    if use_ssh:
        target_url = 'datalad-test:%s' % target_path
    else:
        target_url = target_path
    remote = "magical"
    # for the test of setting a group, will just smoke test while using current
    # user's group
    ds.create_sibling(target_url,
                      name=remote,
                      shared='group',
                      group=os.getgid(),
                      ui=ui)  # not doing recursively
    if standardgroup:
        ds.repo.set_preferred_content('wanted', 'standard', remote)
        ds.repo.set_preferred_content('group', standardgroup, remote)
    ds.publish(to=remote)

    # now a month later we created a new subdataset... a few of the nested ones
    # A known hiccup happened when there
    # is also subsub ds added - we might incorrectly traverse and not prepare
    # sub first for subsub to inherit etc
    parent_ds = ds
    subdss = []
    nlevels = 2  # gets slow: 1 - 43 sec, 2 - 49 sec , 3 - 69 sec
    for levels in range(nlevels):
        subds = parent_ds.create('sub')
        create_tree(subds.path, {'sub.dat': 'lots of data'})
        parent_ds.save('sub', recursive=True)
        ok_file_under_git(subds.path, 'sub.dat', annexed=True)
        parent_ds = subds
        subdss.append(subds)

    target_subdss = [
        Dataset(opj(*([target_path] + ['sub'] * (i + 1))))
        for i in range(nlevels)
    ]
    # since we do not have yet/thus have not used an option to record to publish
    # to that sibling by default (e.g. --set-upstream), if we run just ds.publish
    # -- should fail
    assert_result_count(
        ds.publish(on_failure='ignore'),
        1,
        status='impossible',
        message=
        'No target sibling configured for default publication, please specify via --to'
    )
    ds.publish(
        to=remote)  # should be ok, non recursive; BUT it (git or us?) would
    # create an empty sub/ directory
    assert_postupdate_hooks(target_path, installed=ui)
    for target_sub in target_subdss:
        ok_(not target_sub.is_installed())  # still not there
    res = ds.publish(to=remote, recursive=True, on_failure='ignore')
    assert_result_count(res, 1 + len(subdss))
    assert_status(('error', 'notneeded'), res)
    assert_result_count(res,
                        len(subdss),
                        status='error',
                        message=("Unknown target sibling '%s' for publication",
                                 'magical'))

    # Finally publishing with inheritance
    ds.publish(to=remote, recursive=True, missing='inherit')
    assert_postupdate_hooks(target_path, installed=ui)

    def check_dss():
        # we added the remote and set all the
        for subds in subdss:
            eq_(subds.repo.get_preferred_content('wanted', remote),
                'standard' if standardgroup else '')
            eq_(subds.repo.get_preferred_content('group', remote),
                standardgroup or '')

        for target_sub in target_subdss:
            ok_(target_sub.is_installed())  # it is there now
            eq_(target_sub.repo.config.get('core.sharedrepository'), '1')
            # and we have transferred the content
            if standardgroup and standardgroup == 'backup':
                # only then content should be copied
                ok_file_has_content(opj(target_sub.path, 'sub.dat'),
                                    'lots of data')
            else:
                # otherwise nothing is copied by default
                assert_false(target_sub.repo.file_has_content('sub.dat'))

    check_dss()
    # and it should be ok to reconfigure the full hierarchy of datasets
    # while "inheriting". No URL must be specified, and we must not blow
    # but just issue a warning for the top level dataset which has no super,
    # so cannot inherit anything - use case is to fixup/establish the full
    # hierarchy on the remote site
    with swallow_logs(logging.WARNING) as cml:
        out = ds.create_sibling(None,
                                name=remote,
                                existing="reconfigure",
                                inherit=True,
                                ui=ui,
                                recursive=True)
        eq_(len(out), 1 + len(subdss))
        assert_in("Cannot determine super dataset", cml.out)

    check_dss()
Example #39
0
    def _setup_mount(self):
        self.operations = Operations(
            os.getuid(),
            os.getgid(),
            api_client=self.api,
            encoding=self.args.encoding,
            inode_cache=InodeCache(cap=self.args.directory_cache),
            enable_write=self.args.enable_write)

        if self.args.crunchstat_interval:
            statsthread = threading.Thread(target=crunchstat.statlogger,
                                           args=(self.args.crunchstat_interval,
                                                 self.api.keep,
                                                 self.operations))
            statsthread.daemon = True
            statsthread.start()

        usr = self.api.users().current().execute(num_retries=self.args.retries)
        now = time.time()
        dir_class = None
        dir_args = [
            llfuse.ROOT_INODE, self.operations.inodes, self.api,
            self.args.retries
        ]
        mount_readme = False

        if self.args.collection is not None:
            # Set up the request handler with the collection at the root
            # First check that the collection is readable
            self.api.collections().get(uuid=self.args.collection).execute()
            self.args.mode = 'collection'
            dir_class = CollectionDirectory
            dir_args.append(self.args.collection)
        elif self.args.project is not None:
            self.args.mode = 'project'
            dir_class = ProjectDirectory
            dir_args.append(self.api.groups().get(
                uuid=self.args.project).execute(num_retries=self.args.retries))

        if (self.args.mount_by_id or self.args.mount_by_pdh
                or self.args.mount_by_tag or self.args.mount_home
                or self.args.mount_shared or self.args.mount_tmp):
            if self.args.mode is not None:
                sys.exit(
                    "Cannot combine '{}' mode with custom --mount-* options.".
                    format(self.args.mode))
        elif self.args.mode is None:
            # If no --mount-custom or custom mount args, --all is the default
            self.args.mode = 'all'

        if self.args.mode in ['by_id', 'by_pdh']:
            # Set up the request handler with the 'magic directory' at the root
            dir_class = MagicDirectory
            dir_args.append(self.args.mode == 'by_pdh')
        elif self.args.mode == 'by_tag':
            dir_class = TagsDirectory
        elif self.args.mode == 'shared':
            dir_class = SharedDirectory
            dir_args.append(usr)
        elif self.args.mode == 'home':
            dir_class = ProjectDirectory
            dir_args.append(usr)
            dir_args.append(True)
        elif self.args.mode == 'all':
            self.args.mount_by_id = ['by_id']
            self.args.mount_by_tag = ['by_tag']
            self.args.mount_home = ['home']
            self.args.mount_shared = ['shared']
            mount_readme = True

        if dir_class is not None:
            ent = dir_class(*dir_args)
            self.operations.inodes.add_entry(ent)
            self.listen_for_events = ent.want_event_subscribe()
            return

        e = self.operations.inodes.add_entry(
            Directory(llfuse.ROOT_INODE, self.operations.inodes))
        dir_args[0] = e.inode

        for name in self.args.mount_by_id:
            self._add_mount(e, name, MagicDirectory(*dir_args, pdh_only=False))
        for name in self.args.mount_by_pdh:
            self._add_mount(e, name, MagicDirectory(*dir_args, pdh_only=True))
        for name in self.args.mount_by_tag:
            self._add_mount(e, name, TagsDirectory(*dir_args))
        for name in self.args.mount_home:
            self._add_mount(
                e, name,
                ProjectDirectory(*dir_args, project_object=usr, poll=True))
        for name in self.args.mount_shared:
            self._add_mount(e, name,
                            SharedDirectory(*dir_args, exclude=usr, poll=True))
        for name in self.args.mount_tmp:
            self._add_mount(e, name, TmpCollectionDirectory(*dir_args))

        if mount_readme:
            text = self._readme_text(arvados.config.get('ARVADOS_API_HOST'),
                                     usr['email'])
            self._add_mount(e, 'README', StringFile(e.inode, text, now))