Пример #1
0
 def __init__(self, sgfs, project_root):
     
     self.sgfs = sgfs
     self.project_root = os.path.abspath(project_root)
     
     # We are in the middle of a transtion of where the SQLite file
     # is located, and for now we prioritize the old location.
     for name in ('.sgfs-cache.sqlite', '.sgfs/cache.sqlite'):
         db_path = os.path.join(project_root, name)
         if os.path.exists(db_path):
             break
     else:
         # If it doesn't exist then touch it with read/write permissions for all.
         db_dir = os.path.dirname(db_path)
         umask = os.umask(0)
         try:
             try:
                 os.makedirs(db_dir)
             except OSError as e:
                 if e.errno != errno.EEXIST:
                     raise
             os.umask(0111)
             call(['touch', db_path])
         finally:
             os.umask(umask)
     
     self.conn = sqlite3.connect(db_path)
     self.conn.text_factory = str
     
     with self.conn:
         self.conn.execute('CREATE TABLE IF NOT EXISTS entity_paths (entity_type TEXT, entity_id INTEGER, path TEXT)')
         self.conn.execute('CREATE UNIQUE INDEX IF NOT EXISTS entity_paths_entity ON entity_paths(entity_type, entity_id)')
Пример #2
0
def daemonize( errfile ):
    """
    Detach process and become a daemon.
    """
    pid = os.fork()
    if pid:
        os._exit(0)

    os.setsid()
    signal.signal(signal.SIGHUP, signal.SIG_IGN)
    os.umask(0)

    pid = os.fork()
    if pid:
        os._exit(0)

    os.chdir("/")
    for fd in range(0,20):
        try:
            os.close(fd)
        except OSError:
            pass

    sys.stdin = open("/dev/null","r")
    sys.stdout = open("/dev/null","w")
    sys.stderr = ErrorLog( errfile )
Пример #3
0
    def drop_privileges(self, uid_name=None, gid_name=None):
        """ Drop privileges
        
        Found in https://github.com/zedshaw/python-lust/blob/master/lust/unix.py
        """
        if os.getuid() != 0:
            self.logger.warning("Must be root to drop privileges!")
            return
    
        # Get the uid/gid from the name. If no group given, then derive group from uid_name
        if uid_name is None:
            uid_name = "nobody"  # builtin default is nobody
        running_uid = pwd.getpwnam(uid_name).pw_uid
        if gid_name is None:
            running_gid = pwd.getpwnam(uid_name).pw_gid
        else:
            running_gid = grp.getgrnam(gid_name).gr_gid

        self.logger.debug("Running as %r.%r" % (running_uid, running_gid))
    
        # Remove group privileges
        os.setgroups([])
    
        # Try setting the new uid/gid
        os.setgid(running_gid)
        os.setuid(running_uid)
    
        # Ensure a very conservative umask
        os.umask(077)
Пример #4
0
def addAlbumArt(artwork, albumpath, release):
    logger.info('Adding album art to folder')
    
    try:
        year = release['ReleaseDate'][:4]
    except TypeError:
        year = ''
    
    values = {  '$Artist':      release['ArtistName'],
                '$Album':       release['AlbumTitle'],
                '$Year':        year,
                '$artist':      release['ArtistName'].lower(),
                '$album':       release['AlbumTitle'].lower(),
                '$year':        year
                }
    
    album_art_name = helpers.replace_all(headphones.ALBUM_ART_FORMAT.strip(), values).replace('/','_') + ".jpg"

    album_art_name = album_art_name.replace('?','_').replace(':', '_').encode(headphones.SYS_ENCODING, 'replace')

    if headphones.FILE_UNDERSCORES:
        album_art_name = album_art_name.replace(' ', '_')

    if album_art_name.startswith('.'):
        album_art_name = album_art_name.replace(0, '_')

    prev = os.umask(headphones.UMASK)
    file = open(os.path.join(albumpath, album_art_name), 'wb')
    file.write(artwork)
    file.close()
    os.umask(prev)
Пример #5
0
def downloadChunks(url):
    """Helper to download large files
        the only arg is a url
       this file will go to a temp directory
       the file will also be downloaded
       in chunks and print out how much remains
    """

    baseFile = path.basename(url)

    #move the file to a more uniq path
    umask(0002)

    try:
        temp_path='/tmp'
        file = path.join(temp_path,baseFile)

        req = urllib2.urlopen(url)
        total_size = int(req.info().getheader('Content-Length').strip())
        downloaded = 0
        CHUNK = 256 * 10240
        with open(file, 'wb') as fp:
            while True:
                chunk = req.read(CHUNK)
                downloaded += len(chunk)
                print math.floor( (downloaded / total_size) * 100 )
                if not chunk: break
                fp.write(chunk)
    except urllib2.HTTPError, e:
        print "HTTP Error:",e.code , url
        return False
Пример #6
0
def daemonize():
   try:
        pid=os.fork()
        if pid>0:
            sys.exit(0)
   except OSError as e:
       print e
       sys.exit(1)
   os.chdir('/')
   os.umask(0)
   os.setsid()
   try:
       pid=os.fork()
       if pid>0:
           sys.exit(0)
   except OSError as e:
       print e
       sys.exit(1)
   for f in sys.stdout, sys.stderr: f.flush()
   si=file('/dev/null','r')
   so=file('/dev/null','a+')
   se=file('/dev/null','a+',0)
   os.dup2(si.fileno(), sys.stdin.fileno())
   os.dup2(so.fileno(), sys.stdout.fileno())
   os.dup2(se.fileno(), sys.stderr.fileno())
Пример #7
0
 def _daemonize(self):
     if not self.config.NODETACH:
         # fork so the parent can exist
         if os.fork():
             return -1
         # deconnect from tty and create a new session
         os.setsid()
         # fork again so the parent, (the session group leader), can exit.
         # as a non-session group leader, we can never regain a controlling
         # terminal.
         if os.fork():
             return -1
         # move to the root to avoit mount pb
         os.chdir("/")
         # set paranoid umask
         os.umask(0o77)
         # write pid in a file
         f = open(self._pid_file, "w")
         f.write(str(os.getpid()))
         f.close()
         # close standard descriptors
         sys.stdin.close()
         sys.stdout.close()
         sys.stderr.close()
         # put signal handler
         signal.signal(signal.SIGTERM, self.signal_handler)
         signal.signal(signal.SIGHUP, self.signal_handler)
Пример #8
0
def getSerial(filename='/tmp/twisted-names.serial'):
    """
    Return a monotonically increasing (across program runs) integer.

    State is stored in the given file.  If it does not exist, it is
    created with rw-/---/--- permissions.

    @param filename: Path to a file that is used to store the state across
        program runs.
    @type filename: L{str}

    @return: a monotonically increasing number
    @rtype: L{str}
    """
    serial = time.strftime('%Y%m%d')

    o = os.umask(0o177)
    try:
        if not os.path.exists(filename):
            with open(filename, 'w') as f:
                f.write(serial + ' 0')
    finally:
        os.umask(o)

    with open(filename, 'r') as serialFile:
        lastSerial, zoneID = serialFile.readline().split()

    zoneID = (lastSerial == serial) and (int(zoneID) + 1) or 0

    with open(filename, 'w') as serialFile:
        serialFile.write('%s %d' % (serial, zoneID))

    serial = serial + ('%02d' % (zoneID,))
    return serial
Пример #9
0
def detach():
    try:
        if os.fork() != 0:
            # Exit from parent process.
            sys.exit(0)
    except OSError as error:
        print >>sys.stderr, "fork failed: %s" % error.message
        sys.exit(1)

    os.setsid()
    os.umask(0)

    try:
        if os.fork() != 0:
            # Exit from parent process.
            sys.exit(0)
    except OSError as error:
        print >>sys.stderr, "fork failed: %s" % error.message
        sys.exit(1)

    sys.stdout.flush()
    sys.stderr.flush()

    stdin = open("/dev/null", "r")
    stdout = open("/dev/null", "a+")
    stderr = open("/dev/null", "a+")

    os.dup2(stdin.fileno(), sys.stdin.fileno())
    os.dup2(stdout.fileno(), sys.stdout.fileno())
    os.dup2(stderr.fileno(), sys.stderr.fileno())
Пример #10
0
    def daemonize(self):
        """
        Fork off as a daemon
        """
        # pylint: disable=protected-access
        # An object is accessed for a non-existent member.
        # Access to a protected member of a client class
        # Make a non-session-leader child process
        try:
            pid = os.fork()  # @UndefinedVariable - only available in UNIX
            if pid != 0:
                os._exit(0)
        except OSError as error:
            sys.stderr.write('fork #1 failed: {error_num}: {error_message}\n'.format
                             (error_num=error.errno, error_message=error.strerror))
            sys.exit(1)

        os.setsid()  # @UndefinedVariable - only available in UNIX

        # https://github.com/SickRage/SickRage/issues/2969
        # http://www.microhowto.info/howto/cause_a_process_to_become_a_daemon_in_c.html#idp23920
        # https://www.safaribooksonline.com/library/view/python-cookbook/0596001673/ch06s08.html
        # Previous code simply set the umask to whatever it was because it was ANDing instead of OR-ing
        # Daemons traditionally run with umask 0 anyways and this should not have repercussions
        os.umask(0)

        # Make the child a session-leader by detaching from the terminal
        try:
            pid = os.fork()  # @UndefinedVariable - only available in UNIX
            if pid != 0:
                os._exit(0)
        except OSError as error:
            sys.stderr.write('fork #2 failed: Error {error_num}: {error_message}\n'.format
                             (error_num=error.errno, error_message=error.strerror))
            sys.exit(1)

        # Write pid
        if self.create_pid:
            pid = os.getpid()
            logger.log('Writing PID: {pid} to {filename}'.format(pid=pid, filename=self.pid_file))

            try:
                with io.open(self.pid_file, 'w') as f_pid:
                    f_pid.write('{0}\n'.format(pid))
            except EnvironmentError as error:
                logger.log_error_and_exit('Unable to write PID file: {filename} Error {error_num}: {error_message}'.format
                                          (filename=self.pid_file, error_num=error.errno, error_message=error.strerror))

        # Redirect all output
        sys.stdout.flush()
        sys.stderr.flush()

        devnull = getattr(os, 'devnull', '/dev/null')
        stdin = file(devnull)
        stdout = file(devnull, 'a+')
        stderr = file(devnull, 'a+')

        os.dup2(stdin.fileno(), getattr(sys.stdin, 'device', sys.stdin).fileno())
        os.dup2(stdout.fileno(), getattr(sys.stdout, 'device', sys.stdout).fileno())
        os.dup2(stderr.fileno(), getattr(sys.stderr, 'device', sys.stderr).fileno())
Пример #11
0
    def download(self, pluginPath='',callback = None):
        core = core_install.CoreInstall()

        if pluginPath:#     and os.path.exists(pluginPath):
            src = pluginPath
            checksum = ""       # TBD: Local copy may have different checksum. So ignoring checksum
        else:
            sts, url, checksum = self.__getPluginInformation(callback)
            src = url
            if sts != ERROR_SUCCESS:
                return sts, "", queryString(ERROR_CHECKSUM_ERROR, 0, src)

        log.debug("Downloading %s plug-in file from '%s' to '%s'..." % (self.__required_version, src, self.__plugin_path))
        plugin_file = os.path.join(self.__plugin_path, self.__plugin_name)
        try:
            os.umask(0)
            if not os.path.exists(self.__plugin_path):
                os.makedirs(self.__plugin_path, 0755)
            if os.path.exists(plugin_file):
                os.remove(plugin_file)
            if os.path.exists(plugin_file+'.asc'):
                os.remove(plugin_file+'.asc')

        except (OSError, IOError), e:
            log.error("Failed in OS operations:%s "%e.strerror)
            return ERROR_DIRECTORY_NOT_FOUND, "", self.__plugin_path + queryString(102)
Пример #12
0
def standard_logging_setup(filename=None, verbose=False, debug=False,
                           filemode='w', console_format=None):
    if console_format is None:
        console_format = LOGGING_FORMAT_STANDARD_CONSOLE

    root_logger = logging.getLogger()
    root_logger.setLevel(logging.DEBUG)

    # File output is always logged at debug level
    if filename is not None:
        umask = os.umask(0o177)
        try:
            file_handler = logging.FileHandler(filename, mode=filemode)
        finally:
            os.umask(umask)
        file_handler.setLevel(logging.DEBUG)
        file_handler.setFormatter(Formatter(LOGGING_FORMAT_STANDARD_FILE))
        root_logger.addHandler(file_handler)

    level = logging.ERROR
    if verbose:
        level = logging.INFO
    if debug:
        level = logging.DEBUG

    console_handler = logging.StreamHandler()
    console_handler.setLevel(level)
    console_handler.setFormatter(Formatter(console_format))
    root_logger.addHandler(console_handler)
Пример #13
0
 def _daemonize():
     pid = os.fork() 
     if pid > 0:
         # exit first parent
         sys.exit(0) 
 
     # decouple from parent environment
     os.chdir(WORKDIR) 
     os.setsid() 
     os.umask(0) 
 
     # do second fork     
     pid = os.fork() 
     if pid > 0:
         # exit from second parent
         sys.exit(0) 
 
     # redirect standard file descriptors
     sys.stdout.flush()
     sys.stderr.flush()
     si = open(LOG_FILE, 'r')
     so = open(LOG_FILE, 'a+')
     se = open(LOG_FILE, 'a+', 0)
     os.dup2(si.fileno(), sys.stdin.fileno())
     os.dup2(so.fileno(), sys.stdout.fileno())
     os.dup2(se.fileno(), sys.stderr.fileno())
 
     # write pidfile
     pid = str(os.getpid())
     f = open(PID_FILE,'w')
     f.write("%s\n" % pid)
     f.close()
     atexit.register(lambda: os.remove(PID_FILE))
def daemon(working_directory='/', pidfile=None, stdin=None, stdout=None, stderr=None):
    stdin = stdin or '/dev/null'
    stdout = stdout or '/dev/null'
    stderr = stderr or '/dev/null'

    pid = os.fork()
    if pid != 0:
        sys.exit(0)
    
    os.chdir(working_directory)
    os.setsid() # Create new session and sets process group.
    os.umask(2)
    
    pid = os.fork() # Will have INIT (pid 1) as parent process...
    if pid != 0: # if pid is not child...
        sys.exit(0)
        
    sys.stdout.flush()
    sys.stderr.flush()
    si = file(stdin, "r")
    so = file(stdout, "a+")
    se = file(stderr, "a+", 0)
    os.dup2(si.fileno(), sys.stdin.fileno())
    os.dup2(so.fileno(), sys.stdout.fileno())
    os.dup2(se.fileno(), sys.stderr.fileno())

    if pidfile:
        writeto(pidfile, str(os.getpid()))
Пример #15
0
def tmpfile(stream, mode=None):
    """Context manager that writes a :class:`Stream` object to a named
    temporary file and yield it's filename. Cleanup deletes from the temporary
    file from disk.

    Args:
        stream (Stream): Stream object to write to disk as temporary file.
        mode (int, optional): File mode to set on temporary file.

    Returns:
        str: Temporoary file name
    """
    tmp = NamedTemporaryFile(delete=False)

    if mode is not None:
        oldmask = os.umask(0)

        try:
            os.chmod(tmp.name, mode)
        finally:
            os.umask(oldmask)

    for data in stream:
        tmp.write(to_bytes(data))

    tmp.close()

    yield tmp.name

    os.remove(tmp.name)
Пример #16
0
 def getUMask(self):
   if (self.current_umask == -1):
     self.current_umask = os.umask(self.current_umask)
     os.umask(self.current_umask)
     return self.current_umask
   else:
     return self.current_umask
Пример #17
0
 def method_umask(self, space, mask=-1):
     if mask >= 0:
         return space.newint(os.umask(mask))
     else:
         current_umask = os.umask(0)
         os.umask(current_umask)
         return space.newint(current_umask)
Пример #18
0
def daemonize():
    """\
    Standard daemonization of a process.
    http://www.svbug.com/documentation/comp.unix.programmer-FAQ/faq_2.html#SEC16
    """
    if not 'GUNICORN_FD' in os.environ:
        if os.fork():
            os._exit(0)
        os.setsid()

        if os.fork():
            os._exit(0)
        
        os.umask(0)
        maxfd = get_maxfd()

        # Iterate through and close all file descriptors.
        for fd in range(0, maxfd):
            try:
                os.close(fd)
            except OSError:	# ERROR, fd wasn't open to begin with (ignored)
                pass
        
        os.open(REDIRECT_TO, os.O_RDWR)
        os.dup2(0, 1)
        os.dup2(0, 2)
Пример #19
0
Файл: agent.py Проект: allgi/mmc
    def run(self):
        # If umask = 0077, created files will be rw for effective user only
        # If umask = 0007, they will be rw for effective user and group only
        os.umask(self.config.umask)
        os.setegid(self.config.egid)
        os.seteuid(self.config.euid)

        # Daemonize early
        if self.daemon:
            self.lock.acquire()
            self.daemonize()

        # Do all kind of initialization
        try:
            ret = self.initialize()
        finally:
            # Tell the father how to return, and let him return (release)
            if self.daemon:
                self.state = ret
                self.lock.release()

        if ret:
            return ret

        reactor.run()
Пример #20
0
 def __init__(self, threads=1000, initialize_glance_store=False):
     os.umask(0o27)  # ensure files are created with the correct privileges
     self._logger = logging.getLogger("eventlet.wsgi.server")
     self.threads = threads
     self.children = set()
     self.stale_children = set()
     self.running = True
     # NOTE(abhishek): Allows us to only re-initialize glance_store when
     # the API's configuration reloads.
     self.initialize_glance_store = initialize_glance_store
     self.pgid = os.getpid()
     try:
         # NOTE(flaper87): Make sure this process
         # runs in its own process group.
         os.setpgid(self.pgid, self.pgid)
     except OSError:
         # NOTE(flaper87): When running glance-control,
         # (glance's functional tests, for example)
         # setpgid fails with EPERM as glance-control
         # creates a fresh session, of which the newly
         # launched service becomes the leader (session
         # leaders may not change process groups)
         #
         # Running glance-(api|registry) is safe and
         # shouldn't raise any error here.
         self.pgid = 0
Пример #21
0
    def test_default_mode(self):
        # Turn off umask for inital testing of modes
        os.umask(0000)

        # Create a db, check default mode is 0666
        sqlite3dbm.dbm.SqliteMap(self.path, flag='c')
        testify.assert_equal(self.get_perm_mask(self.path), 0666)
Пример #22
0
def umask():
    '''
    Get umask without changing it.
    '''
    old = os.umask(0)
    os.umask(old)
    return old
Пример #23
0
def initialize_1(config_path=None):
    """First initialization step.

    * Zope component architecture
    * The configuration system
    * Run-time directories

    :param config_path: The path to the configuration file.
    :type config_path: string
    """
    zcml = resource_string('mailman.config', 'configure.zcml')
    xmlconfig.string(zcml)
    # By default, set the umask so that only owner and group can read and
    # write our files.  Specifically we must have g+rw and we probably want
    # o-rwx although I think in most cases it doesn't hurt if other can read
    # or write the files.
    os.umask(0o007)
    # Initialize configuration event subscribers.  This must be done before
    # setting up the configuration system.
    from mailman.app.events import initialize as initialize_events
    initialize_events()
    # config_path will be set if the command line argument -C is given.  That
    # case overrides all others.  When not given on the command line, the
    # configuration file is searched for in the file system.
    if config_path is None:
        config_path = search_for_configuration_file()
    elif config_path is INHIBIT_CONFIG_FILE:
        # For the test suite, force this back to not using a config file.
        config_path = None
    mailman.config.config.load(config_path)
Пример #24
0
	def start(self, sock, force = False):
		logSys.info("Starting Fail2ban v" + version.version)
		
		# Install signal handlers
		signal.signal(signal.SIGTERM, self.__sigTERMhandler)
		signal.signal(signal.SIGINT, self.__sigTERMhandler)
		
		# First set the mask to only allow access to owner
		os.umask(0077)
		if self.__daemon:
			logSys.info("Starting in daemon mode")
			ret = self.__createDaemon()
			if ret:
				logSys.info("Daemon started")
			else:
				logSys.error("Could not create daemon")
				raise ServerInitializationError("Could not create daemon")
		
		# Creates a PID file.
		try:
			logSys.debug("Creating PID file %s" % Server.PID_FILE)
			pidFile = open(Server.PID_FILE, 'w')
			pidFile.write("%s\n" % os.getpid())
			pidFile.close()
		except IOError, e:
			logSys.error("Unable to create PID file: %s" % e)
Пример #25
0
    def run(self):
        """
        Start program
        """
        options = Options()

        os.umask(int('022', 8))
        archive = options.get_archive()
        try:
            with tarfile.open(archive+'.part', 'w') as ofile:
                self._addfile(ofile, options.get_files())
        except OSError:
            raise SystemExit(
                sys.argv[0] + ': Cannot create "' +
                archive + '.part" archive file.'
            )
        try:
            shutil.move(archive+'.part', archive)
        except OSError:
            raise SystemExit(
                '{0:s}: Cannot create "{1:s}" archive file.'.format(
                    sys.argv[0],
                    archive
                )
            )
Пример #26
0
def create_daemon(_pidfile):
    global pidfile
    pidfile = _pidfile
    if os.path.isfile(pidfile):
        print('pid file[' + pidfile + '] still exist.  please check your system.')
        os._exit(1)
    if not os.path.isdir(os.path.dirname(pidfile)):
        os.mkdir(os.path.dirname(pidfile))
    pid = os.fork()
    if pid == 0:
        os.setsid()
        with open(pidfile, 'w') as f:
            f.write(str(os.getpid()))
        os.chdir('/')
        os.umask(0)
    else:  # parent goes bye bye
        os._exit(0)

    si = os.open('/dev/null', os.O_RDONLY)
    so = os.open('/dev/null', os.O_RDWR)
    se = os.open('/dev/null', os.O_RDWR)
    os.dup2(si, sys.stdin.fileno())
    os.dup2(so, sys.stdout.fileno())
    os.dup2(se, sys.stderr.fileno())
    os.close(si)
    os.close(so)
    os.close(se)
    signal.signal(signal.SIGINT, signal_handler)
    signal.signal(signal.SIGHUP, signal_handler)
    signal.signal(signal.SIGTERM, signal_handler)
Пример #27
0
def _daemonize():

    # Fork once.
    try:
        pid = os.fork()
        if pid > 0:
            os._exit(0)
    except OSError:
        return

    # Set some options to detach from the terminal.
    os.chdir('/')
    os.setsid()
    os.umask(0)

    # Fork again.
    try:
        pid = os.fork()
        if pid > 0:
            os._exit(0)
    except OSError:
        return

    # Find the OS /dev/null equivalent.
    nullfile = getattr(os, 'devnull', '/dev/null')

    # Redirect all standard I/O to /dev/null.
    sys.stdout.flush()
    sys.stderr.flush()
    si = file(nullfile, 'r')
    so = file(nullfile, 'a+')
    se = file(nullfile, 'a+', 0)
    os.dup2(si.fileno(), sys.stdin.fileno())
    os.dup2(so.fileno(), sys.stdout.fileno())
    os.dup2(se.fileno(), sys.stderr.fileno())
Пример #28
0
def need_deployment():
    """
    Salt thin needs to be deployed - prep the target directory and emit the
    delimeter and exit code that signals a required deployment.
    """
    if os.path.exists(OPTIONS.saltdir):
        shutil.rmtree(OPTIONS.saltdir)
    old_umask = os.umask(0o077)
    os.makedirs(OPTIONS.saltdir)
    os.umask(old_umask)
    # Verify perms on saltdir
    euid = os.geteuid()
    dstat = os.stat(OPTIONS.saltdir)
    if dstat.st_uid != euid:
        # Attack detected, try again
        need_deployment()
    if dstat.st_mode != 16832:
        # Attack detected
        need_deployment()
    # If SUDOing then also give the super user group write permissions
    sudo_gid = os.environ.get('SUDO_GID')
    if sudo_gid:
        os.chown(OPTIONS.saltdir, -1, int(sudo_gid))
        stt = os.stat(OPTIONS.saltdir)
        os.chmod(OPTIONS.saltdir, stt.st_mode | stat.S_IWGRP | stat.S_IRGRP | stat.S_IXGRP)

    # Delimiter emitted on stdout *only* to indicate shim message to master.
    sys.stdout.write("{0}\ndeploy\n".format(OPTIONS.delimiter))
    sys.exit(EX_THIN_DEPLOY)
Пример #29
0
    def open(self, name, mode='rb'):
        """
        Let's create the needed directory structrue before opening the file
        """

        # Create any intermediate directories that do not exist.
        # Note that there is a race between os.path.exists and os.makedirs:
        # if os.makedirs fails with EEXIST, the directory was created
        # concurrently, and we can continue normally. Refs #16082.
        directory = os.path.dirname(name)
        if not os.path.exists(directory):
            try:
                if self.directory_permissions_mode is not None:
                    # os.makedirs applies the global umask, so we reset it,
                    # for consistency with file_permissions_mode behavior.
                    old_umask = os.umask(0)
                    try:
                        os.makedirs(directory, self.directory_permissions_mode)
                    finally:
                        os.umask(old_umask)
                else:
                    os.makedirs(directory)
            except OSError as e:
                if e.errno != errno.EEXIST:
                    raise
        if not os.path.isdir(directory):
            raise IOError("%s exists and is not a directory." % directory)

        return super().open(name, mode=mode)
Пример #30
0
 def _EnsureDatabaseExists(self, target_path):
   # Check if file already exists.
   if os.path.exists(target_path):
     self._WaitUntilReadable(target_path)
     return
   # Copy database file to a file that has no read permissions.
   umask_original = os.umask(0)
   write_permissions = stat.S_IWUSR | stat.S_IWGRP
   read_permissions = stat.S_IRUSR | stat.S_IRGRP
   try:
     fd = os.open(target_path, os.O_CREAT | os.O_EXCL | os.O_WRONLY,
                  write_permissions)
     os.close(fd)
     with open(target_path, "wb") as target_file:
       target_file.write(self.template)
     os.chmod(target_path, write_permissions | read_permissions)
   except OSError:
     # Failed to create file
     if os.path.exists(target_path):
       # Failed because file was created in the meantime (race condition)
       self._WaitUntilReadable(target_path)
     else:
       logging.error("Could not create database file. Make sure the "
                     "data_server has write access to the target_path "
                     "directory to create the file '%s'", target_path)
   finally:
     os.umask(umask_original)
Пример #31
0
 def bind(self, sock):
     old_umask = os.umask(self.conf.umask)
     sock.bind(self.cfg_addr)
     util.chown(self.cfg_addr, self.conf.uid, self.conf.gid)
     os.umask(old_umask)
Пример #32
0
def init(name,
         cpu,
         mem,
         image=None,
         nic='default',
         hypervisor=VIRT_DEFAULT_HYPER,
         start=True,  # pylint: disable=redefined-outer-name
         disk='default',
         saltenv='base',
         **kwargs):
    '''
    Initialize a new vm

    CLI Example:

    .. code-block:: bash

        salt 'hypervisor' virt.init vm_name 4 512 salt://path/to/image.raw
        salt 'hypervisor' virt.init vm_name 4 512 nic=profile disk=profile
    '''
    hypervisor = __salt__['config.get']('libvirt:hypervisor', hypervisor)

    nicp = _nic_profile(nic, hypervisor, **kwargs)

    diskp = None
    seedable = False
    if image:  # with disk template image
        # if image was used, assume only one disk, i.e. the
        # 'default' disk profile
        # TODO: make it possible to use disk profiles and use the
        # template image as the system disk
        diskp = _disk_profile('default', hypervisor, **kwargs)

        # When using a disk profile extract the sole dict key of the first
        # array element as the filename for disk
        disk_name = next(diskp[0].iterkeys())
        disk_type = diskp[0][disk_name]['format']
        disk_file_name = '{0}.{1}'.format(disk_name, disk_type)

        if hypervisor in ['esxi', 'vmware']:
            # TODO: we should be copying the image file onto the ESX host
            raise SaltInvocationError('virt.init does not support image '
                                      'template template in conjunction '
                                      'with esxi hypervisor')
        elif hypervisor in ['qemu', 'kvm']:
            img_dir = __salt__['config.option']('virt.images')
            img_dest = os.path.join(
                img_dir,
                name,
                disk_file_name
            )
            img_dir = os.path.dirname(img_dest)
            sfn = __salt__['cp.cache_file'](image, saltenv)
            if not os.path.isdir(img_dir):
                os.makedirs(img_dir)
            try:
                salt.utils.files.copyfile(sfn, img_dest)
                mask = os.umask(0)
                os.umask(mask)
                # Apply umask and remove exec bit
                mode = (0o0777 ^ mask) & 0o0666
                os.chmod(img_dest, mode)

            except (IOError, OSError):
                return False
            seedable = True
        else:
            log.error('unsupported hypervisor when handling disk image')

    else:
        # no disk template image specified, create disks based on disk profile
        diskp = _disk_profile(disk, hypervisor, **kwargs)
        if hypervisor in ['qemu', 'kvm']:
            # TODO: we should be creating disks in the local filesystem with
            # qemu-img
            raise SaltInvocationError('virt.init does not support disk '
                                      'profiles in conjunction with '
                                      'qemu/kvm at this time, use image '
                                      'template instead')
        else:
            # assume libvirt manages disks for us
            for disk in diskp:
                for disk_name, args in disk.items():
                    xml = _gen_vol_xml(name,
                                       disk_name,
                                       args['size'],
                                       hypervisor)
                    define_vol_xml_str(xml)

    xml = _gen_xml(name, cpu, mem, diskp, nicp, hypervisor, **kwargs)
    define_xml_str(xml)

    if kwargs.get('seed') and seedable:
        install = kwargs.get('install', True)
        seed_cmd = kwargs.get('seed_cmd', 'seed.apply')

        __salt__[seed_cmd](img_dest,
                           id_=name,
                           config=kwargs.get('config'),
                           install=install)
    if start:
        create(name)

    return True
Пример #33
0
import os
import sys


def detach():
    try:
        if os.fork() != 0:
            # Exit from parent process.
            sys.exit(0)
    except OSError, error:
        print >> sys.stderr, "fork failed: %s" % error.message
        sys.exit(1)

    os.setsid()
    os.umask(0)

    try:
        if os.fork() != 0:
            # Exit from parent process.
            sys.exit(0)
    except OSError, error:
        print >> sys.stderr, "fork failed: %s" % error.message
        sys.exit(1)

    sys.stdout.flush()
    sys.stderr.flush()

    stdin = open("/dev/null", "r")
    stdout = open("/dev/null", "a+")
    stderr = open("/dev/null", "a+")
Пример #34
0
    def _create_via_common_rec(self, path, create_symlinks=True):
        if not self.mode:
            raise ApplyError('no metadata - cannot create path ' + path)

        # If the path already exists and is a dir, try rmdir.
        # If the path already exists and is anything else, try unlink.
        st = None
        try:
            st = xstat.lstat(path)
        except OSError as e:
            if e.errno != errno.ENOENT:
                raise
        if st:
            if stat.S_ISDIR(st.st_mode):
                try:
                    os.rmdir(path)
                except OSError as e:
                    if e.errno in (errno.ENOTEMPTY, errno.EEXIST):
                        msg = 'refusing to overwrite non-empty dir ' + path
                        raise Exception(msg)
                    raise
            else:
                os.unlink(path)

        if stat.S_ISREG(self.mode):
            assert(self._recognized_file_type())
            fd = os.open(path, os.O_CREAT|os.O_WRONLY|os.O_EXCL, 0o600)
            os.close(fd)
        elif stat.S_ISDIR(self.mode):
            assert(self._recognized_file_type())
            os.mkdir(path, 0o700)
        elif stat.S_ISCHR(self.mode):
            assert(self._recognized_file_type())
            os.mknod(path, 0o600 | stat.S_IFCHR, self.rdev)
        elif stat.S_ISBLK(self.mode):
            assert(self._recognized_file_type())
            os.mknod(path, 0o600 | stat.S_IFBLK, self.rdev)
        elif stat.S_ISFIFO(self.mode):
            assert(self._recognized_file_type())
            os.mknod(path, 0o600 | stat.S_IFIFO)
        elif stat.S_ISSOCK(self.mode):
            try:
                os.mknod(path, 0o600 | stat.S_IFSOCK)
            except OSError as e:
                if e.errno in (errno.EINVAL, errno.EPERM):
                    s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
                    s.bind(path)
                else:
                    raise
        elif stat.S_ISLNK(self.mode):
            assert(self._recognized_file_type())
            if self.symlink_target and create_symlinks:
                # on MacOS, symlink() permissions depend on umask, and there's
                # no way to chown a symlink after creating it, so we have to
                # be careful here!
                oldumask = os.umask((self.mode & 0o777) ^ 0o777)
                try:
                    os.symlink(self.symlink_target, path)
                finally:
                    os.umask(oldumask)
        # FIXME: S_ISDOOR, S_IFMPB, S_IFCMP, S_IFNWK, ... see stat(2).
        else:
            assert(not self._recognized_file_type())
            add_error('not creating "%s" with unrecognized mode "0x%x"\n'
                      % (path, self.mode))
Пример #35
0
def backgroundbackup(repo, command=None, dest=None):
    """start background backup"""
    ui = repo.ui
    if command is not None:
        background_cmd = command
    elif workspace.currentworkspace(repo):
        background_cmd = ["hg", "cloud", "sync"]
    else:
        background_cmd = ["hg", "cloud", "backup"]
    infinitepush_bgssh = ui.config("infinitepush", "bgssh")
    if infinitepush_bgssh:
        background_cmd += ["--config", "ui.ssh=%s" % infinitepush_bgssh]

    # developer config: infinitepushbackup.bgdebuglocks
    if ui.configbool("infinitepushbackup", "bgdebuglocks"):
        background_cmd += ["--config", "devel.debug-lockers=true"]

    # developer config: infinitepushbackup.bgdebug
    if ui.configbool("infinitepushbackup", "bgdebug", False):
        background_cmd.append("--debug")

    if dest:
        background_cmd += ["--dest", dest]

    logfile = None
    logdir = ui.config("infinitepushbackup", "logdir")
    if logdir:
        # make newly created files and dirs non-writable
        oldumask = os.umask(0o022)
        try:
            try:
                # the user name from the machine
                username = util.getuser()
            except Exception:
                username = "******"

            if not _checkcommonlogdir(logdir):
                raise WrongPermissionsException(logdir)

            userlogdir = os.path.join(logdir, username)
            util.makedirs(userlogdir)

            if not _checkuserlogdir(userlogdir):
                raise WrongPermissionsException(userlogdir)

            reponame = os.path.basename(repo.sharedroot)
            _removeoldlogfiles(userlogdir, reponame)
            logfile = _getlogfilename(logdir, username, reponame)
        except (OSError, IOError) as e:
            ui.debug("background backup log is disabled: %s\n" % e)
        except WrongPermissionsException as e:
            ui.debug(
                ("%s directory has incorrect permission, " +
                 "background backup logging will be disabled\n") % e.logdir)
        finally:
            os.umask(oldumask)

    if not logfile:
        logfile = os.devnull

    with open(logfile, "a") as f:
        timestamp = util.datestr(util.makedate(), "%Y-%m-%d %H:%M:%S %z")
        fullcmd = " ".join(util.shellquote(arg) for arg in background_cmd)
        f.write("\n%s starting: %s\n" % (timestamp, fullcmd))
        # Windows doesn't support background process redirection of std*
        if pycompat.iswindows:
            f = None
        runbgcommand(background_cmd, None, shell=False, stdout=f, stderr=f)
Пример #36
0
    def run_interactive(self, log, args):

        # get list of all PCs for this project
        project_id = self.tk.pipeline_configuration.get_project_id()
        current_pc_name = self.tk.pipeline_configuration.get_name()
        current_pc_id = self.tk.pipeline_configuration.get_shotgun_id()
        pipeline_configs = self.tk.shotgun.find(
            constants.PIPELINE_CONFIGURATION_ENTITY,
            [["project", "is", {
                "type": "Project",
                "id": project_id
            }]], ["code", "linux_path", "windows_path", "mac_path"])

        if len(args) == 1 and args[0] == "--symlink":
            use_symlink = True
        else:
            use_symlink = False

        if len(pipeline_configs) == 1:
            raise TankError(
                "Only one pipeline configuration for this project! Need at least two "
                "configurations in order to push. Please start by cloning a pipeline "
                "configuration inside of Shotgun.")

        log.info(
            "This command will push the configuration in the current pipeline configuration "
            "('%s') to another pipeline configuration in the project. By default, the data "
            "will be copied to the target config folder. If pass a --symlink parameter, it will "
            "create a symlink instead." % current_pc_name)

        log.info("")
        log.info("Your existing configuration will be backed up.")

        if use_symlink:
            log.info("")
            log.info("A symlink will be used.")

        log.info("")

        log.info(
            "The following pipeline configurations are available to push to:")
        path_hash = {}
        for pc in pipeline_configs:
            # skip self
            if pc["id"] == current_pc_id:
                continue
            local_path = pc.get(SG_LOCAL_STORAGE_OS_MAP[sys.platform])
            path_hash[pc["id"]] = local_path
            log.info(" - [%d] %s (%s)" % (pc["id"], pc["code"], local_path))
        log.info("")

        answer = raw_input(
            "Please type in the id of the configuration to push to (ENTER to exit): "
        )
        if answer == "":
            raise TankError("Aborted by user.")
        try:
            target_pc_id = int(answer)
        except:
            raise TankError("Please enter a number!")

        if target_pc_id not in [x["id"] for x in pipeline_configs]:
            raise TankError("Id was not found in the list!")

        target_pc_path = path_hash[target_pc_id]
        target_pc = PipelineConfiguration(target_pc_path)

        # check that both pcs are using the same core version
        target_core_version = target_pc.get_associated_core_version()
        source_core_version = self.tk.pipeline_configuration.get_associated_core_version(
        )

        if target_core_version != source_core_version:
            raise TankError(
                "The configuration you are pushing to is using Core API %s and "
                "the configuration you are pushing from is using Core API %s. "
                "This is not supported - before pushing the changes, make sure "
                "that both configurations are using the "
                "same Core API!" % (target_core_version, source_core_version))

        # check that there are no dev descriptors
        dev_desc = None
        for env_name in self.tk.pipeline_configuration.get_environments():
            env = self.tk.pipeline_configuration.get_environment(env_name)
            for eng in env.get_engines():
                desc = env.get_engine_descriptor(eng)
                if isinstance(desc, TankDevDescriptor):
                    dev_desc = desc
                    break
                for app in env.get_apps(eng):
                    desc = env.get_app_descriptor(eng, app)
                    if isinstance(desc, TankDevDescriptor):
                        dev_desc = desc
                        break
        if dev_desc:
            log.warning(
                "Looks like you have one or more dev locations set up in your "
                "configuration! We strongly recommend that you do not use dev locations "
                "in any production based configs. Dev descriptors are for development "
                "purposes only. You can easily switch a dev location using the "
                "'tank switch_app' command.")
            if not console_utils.ask_yn_question("Okay to proceed?"):
                raise TankError("Aborted.")

        date_suffix = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")

        source_path = os.path.join(self.tk.pipeline_configuration.get_path(),
                                   "config")
        target_tmp_path = os.path.join(target_pc_path,
                                       "config.tmp.%s" % date_suffix)
        symlink_path = os.path.join(target_pc_path, "config.%s" % date_suffix)
        target_path = os.path.join(target_pc_path, "config")
        target_backup_path = os.path.join(target_pc_path,
                                          "config.bak.%s" % date_suffix)

        log.debug("Will push the config from %s to %s" %
                  (source_path, target_path))
        log.info("Hold on, pushing config...")

        ##########################################################################################
        # I/O phase
        old_umask = os.umask(0)
        try:

            # copy to temp location
            try:
                # copy everything!
                log.debug("Copying %s -> %s" % (source_path, target_tmp_path))
                util._copy_folder(log, source_path, target_tmp_path)

                # unlock and remove all the special core files from the tmp folder
                for core_file in CORE_API_FILES + CORE_PC_FILES:
                    path = os.path.join(target_tmp_path, "core", core_file)
                    if os.path.exists(path):
                        os.chmod(path, 0666)
                        log.debug("Removing system file %s" % path)
                        os.remove(path)

                # copy the pc specific special core files from existing cfg to new cfg
                for core_file in CORE_PC_FILES:
                    curr_config_path = os.path.join(target_path, "core",
                                                    core_file)
                    new_config_path = os.path.join(target_tmp_path, "core",
                                                   core_file)
                    log.debug("Copying PC system file %s -> %s" %
                              (curr_config_path, new_config_path))
                    shutil.copy(curr_config_path, new_config_path)

            except Exception, e:
                raise TankError(
                    "Could not copy into temporary target folder '%s'. The target config "
                    "has not been altered. Check permissions and try again! "
                    "Error reported: %s" % (target_tmp_path, e))

            # backup original config
            try:
                if os.path.islink(target_path):
                    # if we are symlinked, no need to back up
                    # just delete the current symlink
                    os.remove(target_path)
                    created_backup = False
                else:
                    # move data to backup folder
                    shutil.move(target_path, target_backup_path)
                    created_backup = True
            except Exception, e:
                raise TankError(
                    "Could not move target folder from '%s' to '%s'. "
                    "Error reported: %s" %
                    (target_path, target_backup_path, e))
Пример #37
0
                except Exception, e:
                    raise TankError(
                        "Could not move new config folder from '%s' to '%s' or create symlink."
                        "Error reported: %s" %
                        (target_tmp_path, symlink_path, e))
            else:
                try:
                    shutil.move(target_tmp_path, target_path)
                except Exception, e:
                    raise TankError(
                        "Could not move new config folder from '%s' to '%s'. "
                        "Error reported: %s" %
                        (target_tmp_path, target_path, e))

        finally:
            os.umask(old_umask)

        ##########################################################################################
        # Post Process Phase

        # now download all apps
        log.info("Checking if there are any apps that need downloading...")
        for env_name in target_pc.get_environments():
            env = target_pc.get_environment(env_name)
            for eng in env.get_engines():
                desc = env.get_engine_descriptor(eng)
                if not desc.exists_local():
                    log.info("Downloading Engine %s..." % eng)
                    desc.download_local()
                for app in env.get_apps(eng):
                    desc = env.get_app_descriptor(eng, app)
Пример #38
0
    def daemonize(self):
        """
        do the UNIX double-fork magic, see W. Richard Stevens'
        "Advanced Programming in the UNIX Environment" for details (ISBN 0201563177)
        https://web.archive.org/web/20070410070022/http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
        """
        # When dropping privs., verify the group and user exist before forking, so any error will get printed.
        if os.getuid() == 0:
            if self.group:
                try:
                    self.the_grp = grp.getgrnam(self.group)
                except Exception as ex:
                    msg = "failed to find group \"%s\"" % self.group
                    logging.critical(msg + " [%s]" % str(ex))
                    sys.stdout.write("  ..." + msg + "!\n")
                    sys.exit(1)
            if self.user:
                try:
                    self.the_pwd = pwd.getpwnam(self.user)
                except Exception as ex:
                    msg = "failed to find user \"%s\"" % self.user
                    logging.critical(msg + " [%s]" % str(ex))
                    sys.stdout.write("  ..." + msg + "!\n")
                    sys.exit(1)

        try:
            pid = os.fork()
            if pid > 0:
                # exit first parent
                sys.exit(0)
        except OSError as e:
            logging.critical("fork #1 failed: %d (%s)\n" %
                             (e.errno, e.strerror))
            sys.exit(1)

        # decouple from parent environment
        os.chdir("/")
        os.setsid()
        os.umask(0)

        # do second fork
        try:
            pid = os.fork()
            if pid > 0:
                # exit from second parent
                sys.exit(0)
        except OSError as e:
            logging.critical("fork #2 failed: %d (%s)\n" %
                             (e.errno, e.strerror))
            sys.exit(1)

        # redirect standard file descriptors
        sys.stdout.flush()
        sys.stderr.flush()
        si = open(self.stdin, 'r')
        so = open(self.stdout, 'a+')
        se = open(self.stderr, 'a+', 0)
        os.dup2(si.fileno(), sys.stdin.fileno())
        os.dup2(so.fileno(), sys.stdout.fileno())
        os.dup2(se.fileno(), sys.stderr.fileno())

        # write pidfile
        atexit.register(self.delpid)
        pid = str(os.getpid())
        open(self.pidfile, 'w+').write("%s\n" % pid)
        self.demote()
Пример #39
0
def main():
    arguments = docopt(
        __doc__.format(
            default_config_dir=constants.config_dir,
            default_root=constants.install_root,
            default_repository=constants.repository_base,
            default_state_dir_root=constants.STATE_DIR_ROOT,
        ),
    )
    umask(0o022)

    # NOTE: Changing root or repository will likely break actually running packages.
    install = Install(
        os.path.abspath(arguments['--root']),
        os.path.abspath(arguments['--config-dir']),
        arguments['--rooted-systemd'],
        not arguments['--no-systemd'],
        not arguments['--no-block-systemd'],
        manage_users=True,
        add_users=not os.path.exists('/etc/mesosphere/manual_host_users'),
        manage_state_dir=True,
        state_dir_root=os.path.abspath(arguments['--state-dir-root']))

    repository = Repository(os.path.abspath(arguments['--repository']))

    try:
        if arguments['setup']:
            actions.setup(install, repository)
            sys.exit(0)

        if arguments['list']:
            print_repo_list(repository.list())
            sys.exit(0)

        if arguments['active']:
            for pkg in sorted(install.get_active()):
                print(pkg)
            sys.exit(0)

        if arguments['add']:
            actions.add_package_file(repository, arguments['<package-tarball>'])
            sys.exit(0)

        if arguments['fetch']:
            for package_id in arguments['<id>']:
                actions.fetch_package(
                    repository,
                    arguments['--repository-url'],
                    package_id,
                    os.getcwd())
            sys.exit(0)

        if arguments['activate']:
            actions.activate_packages(
                install,
                repository,
                arguments['<id>'],
                not arguments['--no-systemd'],
                not arguments['--no-block-systemd'])
            sys.exit(0)

        if arguments['swap']:
            actions.swap_active_package(
                install,
                repository,
                arguments['<package-id>'],
                not arguments['--no-systemd'],
                not arguments['--no-block-systemd'])
            sys.exit(0)

        if arguments['remove']:
            for package_id in arguments['<id>']:
                try:
                    actions.remove_package(install, repository, package_id)
                except PackageNotFound:
                    pass
            sys.exit(0)

        if arguments['uninstall']:
            uninstall(install, repository)
            sys.exit(0)

        if arguments['check']:
            checks = find_checks(install, repository)
            if arguments['--list']:
                list_checks(checks)
                sys.exit(0)
            # Run all checks
            sys.exit(run_checks(checks, install, repository))
    except ValidationError as ex:
        print("Validation Error: {0}".format(ex), file=sys.stderr)
        sys.exit(1)
    except PackageError as ex:
        print("Package Error: {0}".format(ex), file=sys.stderr)
        sys.exit(1)
    except Exception as ex:
        print("ERROR: {0}".format(ex), file=sys.stderr)
        sys.exit(1)

    print("unknown command", file=sys.stderr)
    sys.exit(1)
Пример #40
0
    def __exit__(self, type, value, traceback):
        self._check_entered()

        try:
            # data_path refers to the externally used path to the params. It is a symlink.
            # old_data_path is the path currently pointed to by data_path.
            # tempdir_path is a path where the new params will go, which the new data path will point to.
            # new_data_path is a temporary symlink that will atomically overwrite data_path.
            #
            # The current situation is:
            #   data_path -> old_data_path
            # We're going to write params data to tempdir_path
            #   tempdir_path -> params data
            # Then point new_data_path to tempdir_path
            #   new_data_path -> tempdir_path
            # Then atomically overwrite data_path with new_data_path
            #   data_path -> tempdir_path
            old_data_path = None
            new_data_path = None
            tempdir_path = tempfile.mkdtemp(prefix=".tmp", dir=self._path)

            try:
                # Write back all keys.
                os.chmod(tempdir_path, 0o777)
                for k, v in self._vals.items():
                    with open(os.path.join(tempdir_path, k), "wb") as f:
                        f.write(v)
                        f.flush()
                        os.fsync(f.fileno())
                fsync_dir(tempdir_path)

                data_path = self._data_path()
                try:
                    old_data_path = os.path.join(self._path,
                                                 os.readlink(data_path))
                except (OSError, IOError):
                    # NOTE(mgraczyk): If other DB implementations have bugs, this could cause
                    #                 copies to be left behind, but we still want to overwrite.
                    pass

                new_data_path = "{}.link".format(tempdir_path)
                os.symlink(os.path.basename(tempdir_path), new_data_path)
                os.rename(new_data_path, data_path)
                fsync_dir(self._path)
            finally:
                # If the rename worked, we can delete the old data. Otherwise delete the new one.
                success = new_data_path is not None and os.path.exists(
                    data_path) and (os.readlink(data_path)
                                    == os.path.basename(tempdir_path))

                if success:
                    if old_data_path is not None:
                        shutil.rmtree(old_data_path)
                else:
                    shutil.rmtree(tempdir_path)

                # Regardless of what happened above, there should be no link at new_data_path.
                if new_data_path is not None and os.path.islink(new_data_path):
                    os.remove(new_data_path)
        finally:
            os.umask(self._prev_umask)
            self._prev_umask = None

            # Always release the lock.
            self._lock.release()
            self._lock = None
Пример #41
0
sitePython = "@SITEPYTHON@"
if sitePython:
    sys.path.insert(0, "@SITEPYTHON@")

from DIRAC.Core.Base import Script
Script.parseCommandLine()

from DIRAC import gLogger
from DIRAC.Core.Utilities import DErrno

from DIRAC.WorkloadManagementSystem.JobWrapper.JobWrapper import JobWrapper, rescheduleFailedJob
from DIRAC.WorkloadManagementSystem.Client.JobReport import JobReport

gJobReport = None

os.umask(0o22)


class JobWrapperError(Exception):
    """ Custom exception for handling JobWrapper "genuine" errors
  """
    def __init__(self, value):
        self.value = value

    def __str__(self):
        return str(self.value)


def execute(arguments):
    """ The only real function executed here
  """
Пример #42
0
def generate_template(obj, post_dict):
    '''run template .post script, generating a new file
    The script will run in the source dir (overlay tree) and
    it will run even in dry-run mode
    Returns: True or False on error
    '''

    # Note: this func modifies input parameter 'obj'
    # when it succesfully generates output, it will change obj's paths
    # and it will be picked up again in overlay._walk_subtree()

    if synctool.lib.NO_POST:
        verbose('skipping template generation of %s' % obj.src_path)
        obj.ov_type = synctool.overlay.OV_IGNORE
        return True

    if len(SINGLE_FILES) > 0 and obj.dest_path not in SINGLE_FILES:
        verbose('skipping template generation of %s' % obj.src_path)
        obj.ov_type = synctool.overlay.OV_IGNORE
        return True

    verbose('generating template %s' % obj.print_src())

    src_dir = os.path.dirname(obj.src_path)
    newname = os.path.join(src_dir, os.path.basename(obj.dest_path))
    template = newname + '._template'
    # add most important extension
    newname += '._' + synctool.param.NODENAME

    verbose('generating template as %s' % newname)

    statbuf = synctool.syncstat.SyncStat(newname)
    if statbuf.exists():
        verbose('template destination %s already exists' % newname)

        if synctool.param.SYNC_TIMES:
            # copy source timestamps of template first
            shutil.copystat(obj.src_path, newname)

        # modify the object; set new src and dest filenames
        # later, visit() will call obj.make(), which will make full paths
        obj.src_path = newname
        obj.dest_path = os.path.basename(obj.dest_path)
        return True

    # get the .post script for the template file
    if not template in post_dict:
        if synctool.param.TERSE:
            terse(synctool.lib.TERSE_ERROR, 'no .post %s' % obj.src_path)
        else:
            error('template generator for %s not found' % obj.src_path)
        return False

    generator = post_dict[template]

    # chdir to source directory
    # Note: the change dir is not really needed
    # but the documentation promises that .post scripts run in
    # the dir where the new file will be put
    verbose('  os.chdir(%s)' % src_dir)
    unix_out('cd %s' % src_dir)
    cwd = os.getcwd()
    try:
        os.chdir(src_dir)
    except OSError as err:
        if synctool.param.TERSE:
            terse(synctool.lib.TERSE_ERROR, 'chdir %s' % src_dir)
        else:
            error('failed to change directory to %s: %s' %
                  (src_dir, err.strerror))
        return False

    # temporarily restore original umask
    # so the script runs with the umask set by the sysadmin
    os.umask(synctool.param.ORIG_UMASK)

    # run the script
    # pass template and newname as "$1" and "$2"
    cmd_arr = [generator, obj.src_path, newname]
    verbose('  os.system(%s, %s, %s)' %
            (prettypath(cmd_arr[0]), cmd_arr[1], cmd_arr[2]))
    unix_out('# run command %s' % os.path.basename(cmd_arr[0]))

    have_error = False
    if synctool.lib.exec_command(cmd_arr) == -1:
        have_error = True

    statbuf = synctool.syncstat.SyncStat(newname)
    if not statbuf.exists():
        if not have_error:
            if synctool.param.TERSE:
                terse(synctool.lib.TERSE_WARNING, 'no output %s' % newname)
            else:
                warning('expected output %s was not generated' % newname)
            obj.ov_type = synctool.overlay.OV_IGNORE
        else:
            # an error message was already printed when exec() failed earlier
            # so, only when --verbose is used, print additional debug info
            verbose('error: expected output %s was not generated' % newname)
    else:
        verbose('found generated output %s' % newname)

    os.umask(077)

    # chdir back to original location
    # chdir to source directory
    verbose('  os.chdir(%s)' % cwd)
    unix_out('cd %s' % cwd)
    try:
        os.chdir(cwd)
    except OSError as err:
        if synctool.param.TERSE:
            terse(synctool.lib.TERSE_ERROR, 'chdir %s' % src_dir)
        else:
            error('failed to change directory to %s: %s' % (cwd, err.strerror))
        return False

    if have_error:
        return False

    if synctool.param.SYNC_TIMES:
        # copy source timestamps of template first
        shutil.copystat(obj.src_path, newname)
        verbose('copying timestamp %s => %s' % (obj.src_path, newname))

    # modify the object; set new src and dest filenames
    # later, visit() will call obj.make(), which will make full paths
    obj.src_path = newname
    obj.dest_path = os.path.basename(obj.dest_path)
    return True
def create_folder(path):
    if not os.path.exists(path):
        os.umask(0) #To mask the permission restrictions on new files/directories being create
        os.makedirs(path,0o755) # setting permissions for the folder
Пример #44
0
    def uploadFile(self, resourceType, currentFolder):
        """
		Purpose: command to upload files to server (same as FileUpload)
		"""
        errorNo = 0
        if self.request.has_key("NewFile"):
            # newFile has all the contents we need
            newFile = self.request.get("NewFile", "")
            # Get the file name
            newFileName = newFile.filename
            newFileName = sanitizeFileName(newFileName)
            newFileNameOnly = removeExtension(newFileName)
            newFileExtension = getExtension(newFileName).lower()
            allowedExtensions = Config.AllowedExtensions[resourceType]
            deniedExtensions = Config.DeniedExtensions[resourceType]

            if (allowedExtensions):
                # Check for allowed
                isAllowed = False
                if (newFileExtension in allowedExtensions):
                    isAllowed = True
            elif (deniedExtensions):
                # Check for denied
                isAllowed = True
                if (newFileExtension in deniedExtensions):
                    isAllowed = False
            else:
                # No extension limitations
                isAllowed = True

            if (isAllowed):
                # Upload to operating system
                # Map the virtual path to the local server path
                currentFolderPath = mapServerFolder(self.userFilesFolder,
                                                    currentFolder)
                i = 0
                while (True):
                    newFilePath = os.path.join(currentFolderPath, newFileName)
                    if os.path.exists(newFilePath):
                        i += 1
                        newFileName = "%s(%d).%s" % (newFileNameOnly, i,
                                                     newFileExtension)
                        errorNo = 201  # file renamed
                    else:
                        # Read file contents and write to the desired path (similar to php's move_uploaded_file)
                        fout = file(newFilePath, 'wb')
                        while (True):
                            chunk = newFile.file.read(100000)
                            if not chunk: break
                            fout.write(chunk)
                        fout.close()

                        if os.path.exists(newFilePath):
                            doChmod = False
                            try:
                                doChmod = Config.ChmodOnUpload
                                permissions = Config.ChmodOnUpload
                            except AttributeError:  #ChmodOnUpload undefined
                                doChmod = True
                                permissions = 0755
                            if (doChmod):
                                oldumask = os.umask(0)
                                os.chmod(newFilePath, permissions)
                                os.umask(oldumask)

                        newFileUrl = combinePaths(self.webUserFilesFolder,
                                                  currentFolder) + newFileName

                        return self.sendUploadResults(errorNo, newFileUrl,
                                                      newFileName)
            else:
                return self.sendUploadResults(errorNo=202, customMsg="")
        else:
            return self.sendUploadResults(errorNo=202, customMsg="No File")
    """

    # pylint: disable=E1101
    # Make a non-session-leader child process
    try:
        pid = os.fork()  # @UndefinedVariable - only available in UNIX
        if pid != 0:
            os._exit(0)
    except OSError, e:
        sys.stderr.write("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
        sys.exit(1)

    os.setsid()  # @UndefinedVariable - only available in UNIX

    # Make sure I can read my own files and shut out others
    prev = os.umask(0)
    os.umask(prev and int('077', 8))

    # Make the child a session-leader by detaching from the terminal
    try:
        pid = os.fork()  # @UndefinedVariable - only available in UNIX
        if pid != 0:
            os._exit(0)
    except OSError, e:
        sys.stderr.write("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
        sys.exit(1)

    # Write pid
    if sickbeard.CREATEPID:
        pid = str(os.getpid())
        logger.log(u"Writing PID: " + pid + " to " + str(sickbeard.PIDFILE))
Пример #46
0
def grains(opts, force_refresh=False):
    '''
    Return the functions for the dynamic grains and the values for the static
    grains.

    .. code-block:: python

        import salt.config
        import salt.loader

        __opts__ = salt.config.minion_config('/etc/salt/minion')
        __grains__ = salt.loader.grains(__opts__)
        print __grains__['id']
    '''
    # if we hae no grains, lets try loading from disk (TODO: move to decorator?)
    if not force_refresh:
        if opts.get('grains_cache', False):
            cfn = os.path.join(opts['cachedir'], 'grains.cache.p')
            if os.path.isfile(cfn):
                grains_cache_age = int(time.time() - os.path.getmtime(cfn))
                if opts.get('grains_cache_expiration', 300) >= grains_cache_age and not \
                        opts.get('refresh_grains_cache', False) and not force_refresh:
                    log.debug('Retrieving grains from cache')
                    try:
                        serial = salt.payload.Serial(opts)
                        with salt.utils.fopen(cfn, 'rb') as fp_:
                            cached_grains = serial.load(fp_)
                        return cached_grains
                    except (IOError, OSError):
                        pass
                else:
                    if force_refresh:
                        log.debug(
                            'Grains refresh requested. Refreshing grains.')
                    else:
                        log.debug(
                            'Grains cache last modified {0} seconds ago and '
                            'cache expiration is set to {1}. '
                            'Grains cache expired. Refreshing.'.format(
                                grains_cache_age,
                                opts.get('grains_cache_expiration', 300)))
            else:
                log.debug('Grains cache file does not exist.')

    if opts.get('skip_grains', False):
        return {}
    if 'conf_file' in opts:
        pre_opts = {}
        pre_opts.update(
            salt.config.load_config(
                opts['conf_file'], 'SALT_MINION_CONFIG',
                salt.config.DEFAULT_MINION_OPTS['conf_file']))
        default_include = pre_opts.get('default_include',
                                       opts['default_include'])
        include = pre_opts.get('include', [])
        pre_opts.update(
            salt.config.include_config(default_include,
                                       opts['conf_file'],
                                       verbose=False))
        pre_opts.update(
            salt.config.include_config(include,
                                       opts['conf_file'],
                                       verbose=True))
        if 'grains' in pre_opts:
            opts['grains'] = pre_opts['grains']
        else:
            opts['grains'] = {}
    else:
        opts['grains'] = {}

    grains_data = {}
    funcs = LazyLoader(
        _module_dirs(opts, 'grains', 'grain', ext_type_dirs='grains_dirs'),
        opts,
        tag='grains',
    )
    if force_refresh:  # if we refresh, lets reload grain modules
        funcs.clear()
    # Run core grains
    for key, fun in six.iteritems(funcs):
        if not key.startswith('core.'):
            continue
        ret = fun()
        if not isinstance(ret, dict):
            continue
        grains_data.update(ret)

    # Run the rest of the grains
    for key, fun in six.iteritems(funcs):
        if key.startswith('core.') or key == '_errors':
            continue
        try:
            ret = fun()
        except Exception:
            log.critical('Failed to load grains defined in grain file {0} in '
                         'function {1}, error:\n'.format(key, fun),
                         exc_info=True)
            continue
        if not isinstance(ret, dict):
            continue
        grains_data.update(ret)

    # Write cache if enabled
    if opts.get('grains_cache', False):
        cumask = os.umask(0o77)
        try:
            if salt.utils.is_windows():
                # Make sure cache file isn't read-only
                __salt__['cmd.run']('attrib -R "{0}"'.format(cfn))
            with salt.utils.fopen(cfn, 'w+b') as fp_:
                try:
                    serial = salt.payload.Serial(opts)
                    serial.dump(grains_data, fp_)
                except TypeError:
                    # Can't serialize pydsl
                    pass
        except (IOError, OSError):
            msg = 'Unable to write to grains cache file {0}'
            log.error(msg.format(cfn))
        os.umask(cumask)

    grains_data.update(opts['grains'])
    return grains_data
Пример #47
0
    def _setup_devices(self):
        if self.config['internal_dev_setup']:
            util.rmtree(self.make_chroot_path("dev"),
                        selinux=self.selinux,
                        exclude=self.mounts.get_mountpoints())
            util.mkdirIfAbsent(self.make_chroot_path("dev", "pts"))
            util.mkdirIfAbsent(self.make_chroot_path("dev", "shm"))
            prevMask = os.umask(0000)
            devFiles = [
                (stat.S_IFCHR | 0o666, os.makedev(1, 3), "dev/null"),
                (stat.S_IFCHR | 0o666, os.makedev(1, 7), "dev/full"),
                (stat.S_IFCHR | 0o666, os.makedev(1, 5), "dev/zero"),
                (stat.S_IFCHR | 0o666, os.makedev(1, 8), "dev/random"),
                (stat.S_IFCHR | 0o444, os.makedev(1, 9), "dev/urandom"),
                (stat.S_IFCHR | 0o666, os.makedev(5, 0), "dev/tty"),
                (stat.S_IFCHR | 0o600, os.makedev(5, 1), "dev/console"),
                (stat.S_IFCHR | 0o666, os.makedev(5, 2), "dev/ptmx"),
                (stat.S_IFCHR | 0o666, os.makedev(10,
                                                  237), "dev/loop-control"),
                (stat.S_IFBLK | 0o666, os.makedev(7, 0), "dev/loop0"),
                (stat.S_IFBLK | 0o666, os.makedev(7, 1), "dev/loop1"),
                (stat.S_IFBLK | 0o666, os.makedev(7, 2), "dev/loop2"),
                (stat.S_IFBLK | 0o666, os.makedev(7, 3), "dev/loop3"),
                (stat.S_IFBLK | 0o666, os.makedev(7, 4), "dev/loop4"),
            ]
            kver = os.uname()[2]
            self.root_log.debug("kernel version == %s", kver)
            for i in devFiles:
                # create node
                os.mknod(self.make_chroot_path(i[2]), i[0], i[1])
                # set context. (only necessary if host running selinux enabled.)
                # fails gracefully if chcon not installed.
                if self.selinux:
                    util.do([
                        "chcon", "--reference=/" + i[2],
                        self.make_chroot_path(i[2])
                    ],
                            raiseExc=0,
                            shell=False,
                            env=self.env)

            os.symlink("/proc/self/fd/0", self.make_chroot_path("dev/stdin"))
            os.symlink("/proc/self/fd/1", self.make_chroot_path("dev/stdout"))
            os.symlink("/proc/self/fd/2", self.make_chroot_path("dev/stderr"))

            if os.path.isfile(self.make_chroot_path('etc', 'mtab')) or \
               os.path.islink(self.make_chroot_path('etc', 'mtab')):
                os.remove(self.make_chroot_path('etc', 'mtab'))
            os.symlink("../proc/self/mounts",
                       self.make_chroot_path('etc', 'mtab'))

            os.chown(self.make_chroot_path('dev/tty'),
                     pwd.getpwnam('root')[2],
                     grp.getgrnam('tty')[2])
            os.chown(self.make_chroot_path('dev/ptmx'),
                     pwd.getpwnam('root')[2],
                     grp.getgrnam('tty')[2])

            # symlink /dev/fd in the chroot for everything except RHEL4
            if util.cmpKernelVer(kver, '2.6.9') > 0:
                os.symlink("/proc/self/fd", self.make_chroot_path("dev/fd"))

            os.umask(prevMask)

            if util.cmpKernelVer(kver, '2.6.18') >= 0:
                os.unlink(self.make_chroot_path('/dev/ptmx'))
            os.symlink("pts/ptmx", self.make_chroot_path('/dev/ptmx'))
Пример #48
0
    def save(self, data, jobid):
        global DEFAULT_TAG
        global PASSPHRASEENTRY
        global TEMPSTEGOIMG

        steghideOutput = True
        srcpathimage = self.get_img()

        try:
            shutil.copy2(srcpathimage, TEMPSTEGOIMG)
            os.remove(srcpathimage)

            tmpdir = tempfile.mkdtemp()
            predictable_filename = 'tempfile'
            # Ensure the file is read/write by the creator only
            saved_umask = os.umask(0077)
            pathimplantoutput = os.path.join(tmpdir, predictable_filename)

            try:
                with open(pathimplantoutput, "w") as tmp:
                    tmp.write(str(data))
                    tmp.close()

                    process = subprocess.Popen([
                        'steghide', 'embed', '-p', PASSPHRASEENTRY, '-q', '-f',
                        '-ef', pathimplantoutput, '-cf', TEMPSTEGOIMG
                    ],
                                               stderr=subprocess.STDOUT,
                                               stdout=subprocess.PIPE)
                    out, err = process.communicate()
                    if out:
                        print out
                        if ("steghide:" in out):
                            # steghide error
                            steghideOutput = False
                    if err:
                        print err

            except IOError as e:
                print 'IOError'
                os.remove(pathimplantoutput)
                os.umask(saved_umask)
                os.rmdir(tmpdir)
            else:
                os.remove(pathimplantoutput)
            finally:
                os.umask(saved_umask)
                os.rmdir(tmpdir)

        except:
            print((colored('[-] Error saving image', 'yellow')))

        # Upload img downloaded in cloud service
        if (os.path.isfile(TEMPSTEGOIMG) and steghideOutput):

            try:
                print((colored('[+] Uploaded image to Cloud Service',
                               'white')))
                jobidmaster = "master_" + jobid
                response = upload(
                    TEMPSTEGOIMG,
                    tags=DEFAULT_TAG,
                    public_id=jobidmaster,
                )

            except:
                print((colored('[-] Cloud Service error', 'yellow')))
                return False
            finally:
                if (os.path.isfile(TEMPSTEGOIMG)):
                    os.remove(TEMPSTEGOIMG)
        else:
            return False

        return steghideOutput
Пример #49
0
    def connectionLost(self, reason):
        """
        FIXME: this method is called 4 times on logout....
        it's called once from Avatar.closed() if disconnected
        """
        if self.stdinlogOpen:
            try:
                with open(self.stdinlogFile, 'rb') as f:
                    shasum = hashlib.sha256(f.read()).hexdigest()
                    shasumfile = os.path.join(self.downloadPath, shasum)
                    if os.path.exists(shasumfile):
                        os.remove(self.stdinlogFile)
                        log.msg("Duplicate stdin content {}".format(shasum))
                    else:
                        os.rename(self.stdinlogFile, shasumfile)

                log.msg(
                    eventid='cowrie.session.file_download',
                    format=
                    'Saved stdin contents with SHA-256 %(shasum)s to %(outfile)s',
                    url='stdin',
                    outfile=shasumfile,
                    shasum=shasum,
                    destfile='')
            except IOError as e:
                pass
            finally:
                self.stdinlogOpen = False

        if self.redirFiles:
            for rp in self.redirFiles:

                rf = rp[0]

                if rp[1]:
                    url = rp[1]
                else:
                    url = rf[rf.find('redir_') + len('redir_'):]

                try:
                    if not os.path.exists(rf):
                        continue

                    if os.path.getsize(rf) == 0:
                        os.remove(rf)
                        continue

                    with open(rf, 'rb') as f:
                        shasum = hashlib.sha256(f.read()).hexdigest()
                        shasumfile = os.path.join(self.downloadPath, shasum)
                        if os.path.exists(shasumfile):
                            os.remove(rf)
                            log.msg(
                                "Duplicate redir content with hash {}".format(
                                    shasum))
                        else:
                            os.rename(rf, shasumfile)
                    log.msg(
                        eventid='cowrie.session.file_download',
                        format=
                        'Saved redir contents with SHA-256 %(shasum)s to %(outfile)s',
                        url=url,
                        outfile=shasumfile,
                        shasum=shasum,
                        destfile=url)
                except IOError:
                    pass
            self.redirFiles.clear()

        if self.ttylogEnabled and self.ttylogOpen:
            ttylog.ttylog_close(self.ttylogFile, time.time())
            self.ttylogOpen = False
            shasum = ttylog.ttylog_inputhash(self.ttylogFile)
            shasumfile = os.path.join(self.ttylogPath, shasum)

            if os.path.exists(shasumfile):
                log.msg("Duplicate TTY log with hash {}".format(shasum))
                os.remove(self.ttylogFile)
            else:
                os.rename(self.ttylogFile, shasumfile)
                umask = os.umask(0)
                os.umask(umask)
                os.chmod(shasumfile, 0o666 & ~umask)

            log.msg(
                eventid='cowrie.log.closed',
                format='Closing TTY Log: %(ttylog)s after %(duration)d seconds',
                ttylog=shasumfile,
                size=self.ttylogSize,
                shasum=shasum,
                duration=time.time() - self.startTime)

        insults.ServerProtocol.connectionLost(self, reason)
Пример #50
0
def umask():
    return os.umask(0)
Пример #51
0
def _do_clone(log, tk, source_pc_id, user_id, new_name, target_linux,
              target_mac, target_win):
    """
    Clones the current configuration
    """

    curr_os = {
        "linux2": "linux_path",
        "win32": "windows_path",
        "darwin": "mac_path"
    }[sys.platform]
    source_pc = tk.shotgun.find_one(
        constants.PIPELINE_CONFIGURATION_ENTITY, [["id", "is", source_pc_id]],
        ["code", "project", "linux_path", "windows_path", "mac_path"])
    source_folder = source_pc.get(curr_os)

    target_folder = {
        "linux2": target_linux,
        "win32": target_win,
        "darwin": target_mac
    }[sys.platform]

    log.debug("Cloning %s -> %s" % (source_folder, target_folder))

    if not os.path.exists(source_folder):
        raise TankError("Cannot clone! Source folder '%s' does not exist!" %
                        source_folder)

    if os.path.exists(target_folder):
        raise TankError("Cannot clone! Target folder '%s' already exists!" %
                        target_folder)

    # copy files and folders across
    old_umask = os.umask(0)
    try:
        os.mkdir(target_folder, 0777)
        os.mkdir(os.path.join(target_folder, "cache"), 0777)
        util._copy_folder(log, os.path.join(source_folder, "config"),
                          os.path.join(target_folder, "config"))
        util._copy_folder(log, os.path.join(source_folder, "install"),
                          os.path.join(target_folder, "install"))
        shutil.copy(os.path.join(source_folder, "tank"),
                    os.path.join(target_folder, "tank"))
        shutil.copy(os.path.join(source_folder, "tank.bat"),
                    os.path.join(target_folder, "tank.bat"))
        os.chmod(os.path.join(target_folder, "tank.bat"), 0777)
        os.chmod(os.path.join(target_folder, "tank"), 0777)

        sg_code_location = os.path.join(target_folder, "config", "core",
                                        "install_location.yml")
        if os.path.exists(sg_code_location):
            os.chmod(sg_code_location, 0666)
            os.remove(sg_code_location)
        fh = open(sg_code_location, "wt")
        fh.write("# Shotgun Pipeline Toolkit configuration file\n")
        fh.write("# This file was automatically created by tank clone\n")
        fh.write(
            "# This file reflects the paths in the pipeline configuration\n")
        fh.write("# entity which is associated with this location (%s).\n" %
                 new_name)
        fh.write("\n")
        fh.write("Windows: '%s'\n" % target_win)
        fh.write("Darwin: '%s'\n" % target_mac)
        fh.write("Linux: '%s'\n" % target_linux)
        fh.write("\n")
        fh.write("# End of file.\n")
        fh.close()

    except Exception, e:
        raise TankError("Could not create file system structure: %s" % e)
Пример #52
0
def daemonize(pid_file=None):
    """
    Daemonize the current process. Returns the PID of the continuing child
    process. As an extra option, the PID of the child process can be written to
    a specified PID file.

    Note that parent process ends with `os._exit` instead of `sys.exit`. The
    first will not trigger any cleanups that may have been set. These are left
    for the child process that continues.

    :param str pid_file: Path to PID file to write process ID into. Must be in
                         a writeable folder. If left `None`, no file will be
                         written.
    :return: Process ID
    :rtype: int
    """

    # Dependency check to make sure the imports are OK. Saves you from a lot of
    # debugging trouble when you forget to import them.
    assert atexit.register and os.fork and sys.stdout and gc.collect

    # Force cleanup old resources to minimize the risk of sharing them.
    gc.collect()

    # First fork
    try:
        if os.fork() > 0:
            os._exit(0)
    except OSError as e:
        sys.stderr.write("Unable to fork: %d (%s)\n" % (e.errno, e.strerror))
        sys.exit(1)

    # Decouple from parent
    os.setsid()
    os.umask(0)

    # Second fork
    try:
        if os.fork() > 0:
            os._exit(0)
    except OSError as e:
        sys.stderr.write("Unable to fork: %d (%s)\n" % (e.errno, e.strerror))
        sys.exit(1)

    # Redirect file descriptors
    sys.stdout.flush()
    sys.stderr.flush()

    stdin = file("/dev/null", "r")
    stdout = file("/dev/null", "a+")
    stderr = file("/dev/null", "a+", 0)

    os.dup2(stdin.fileno(), sys.stdin.fileno())
    os.dup2(stdout.fileno(), sys.stdout.fileno())
    os.dup2(stderr.fileno(), sys.stderr.fileno())

    # Write PID file
    if pid_file:
        atexit.register(os.remove, pid_file)

        with open(pid_file, "w+") as fp:
            fp.write("%d" % os.getpid())

    # Return the PID
    return os.getpid()
Пример #53
0
    def handle(self, *args, **options):  # pylint: disable=too-many-locals,too-many-branches,too-many-statements
        os.umask(000)

        start = options['start']
        if start is None:
            start = (timezone.now() -
                     datetime.timedelta(days=1)).strftime('%Y-%m-%d')

        end = options['end']

        if end is None:
            end = timezone.now().strftime('%Y-%m-%d')

        start_date = arrow.get(start).replace(hour=0, minute=0,
                                              second=0).to(settings.TIME_ZONE)
        end_date = arrow.get(end).replace(hour=0, minute=0,
                                          second=0).to(settings.TIME_ZONE)

        redo_dates = []

        sources = DataPoint.objects.filter(
            generator_identifier='pdk-withings-server-auth').order_by(
                'source').values_list('source', flat=True).distinct()

        for source in sources:
            data_point = DataPoint.objects.filter(
                source=source,
                generator_identifier='pdk-withings-server-auth').order_by(
                    '-created').first()

            if data_point is not None:
                properties = data_point.fetch_properties()

                access_token = refresh_access_token(properties)

                index_date = start_date

                while index_date < end_date:
                    next_day = index_date.replace(days=+1)

                    try:
                        # print('FETCHING INTRADAY FOR ' + source + ': ' + str(index_date) + ': ' + str(next_day))

                        fetch_intraday(data_point.source, access_token,
                                       index_date, next_day)

                        time.sleep(1)

                        # print('FETCHING SLEEP MEASURES FOR ' + source + ': ' + str(index_date) + ': ' + str(next_day))

                        fetch_sleep_measures(data_point.source, access_token,
                                             index_date, next_day)

                        time.sleep(1)
                    except requests.exceptions.ReadTimeout:
                        problem_date = index_date.date()

                        if (problem_date in redo_dates) is False:
                            redo_dates.append(problem_date)

                    index_date = next_day

        for redo_date in redo_dates:
            print 'REDO DATE: ' + str(redo_date)
Пример #54
0
import os
# os.system("python")
# os.environ()
var1 = os.getcwd()
print(var1)
var2 = os.getpid()
print(var2)

print(os.umask(0))
Пример #55
0
    def run(self):
        super(VaultCLI, self).run()
        loader = DataLoader()

        # set default restrictive umask
        old_umask = os.umask(0o077)

        vault_ids = list(context.CLIARGS['vault_ids'])

        # there are 3 types of actions, those that just 'read' (decrypt, view) and only
        # need to ask for a password once, and those that 'write' (create, encrypt) that
        # ask for a new password and confirm it, and 'read/write (rekey) that asks for the
        # old password, then asks for a new one and confirms it.

        default_vault_ids = C.DEFAULT_VAULT_IDENTITY_LIST
        vault_ids = default_vault_ids + vault_ids

        # TODO: instead of prompting for these before, we could let VaultEditor
        #       call a callback when it needs it.
        if self.action in ['decrypt', 'view', 'rekey', 'edit']:
            vault_secrets = self.setup_vault_secrets(
                loader,
                vault_ids=vault_ids,
                vault_password_files=list(
                    context.CLIARGS['vault_password_files']),
                ask_vault_pass=context.CLIARGS['ask_vault_pass'])
            if not vault_secrets:
                raise AnsibleOptionsError(
                    "A vault password is required to use Ansible's Vault")

        if self.action in ['encrypt', 'encrypt_string', 'create']:

            encrypt_vault_id = None
            # no --encrypt-vault-id context.CLIARGS['encrypt_vault_id'] for 'edit'
            if self.action not in ['edit']:
                encrypt_vault_id = context.CLIARGS[
                    'encrypt_vault_id'] or C.DEFAULT_VAULT_ENCRYPT_IDENTITY

            vault_secrets = None
            vault_secrets = \
                self.setup_vault_secrets(loader,
                                         vault_ids=vault_ids,
                                         vault_password_files=context.CLIARGS['vault_password_files'],
                                         ask_vault_pass=context.CLIARGS['ask_vault_pass'],
                                         create_new_password=True)

            if len(vault_secrets) > 1 and not encrypt_vault_id:
                raise AnsibleOptionsError(
                    "The vault-ids %s are available to encrypt. Specify the vault-id to encrypt with --encrypt-vault-id"
                    % ','.join([x[0] for x in vault_secrets]))

            if not vault_secrets:
                raise AnsibleOptionsError(
                    "A vault password is required to use Ansible's Vault")

            encrypt_secret = match_encrypt_secret(
                vault_secrets, encrypt_vault_id=encrypt_vault_id)

            # only one secret for encrypt for now, use the first vault_id and use its first secret
            # TODO: exception if more than one?
            self.encrypt_vault_id = encrypt_secret[0]
            self.encrypt_secret = encrypt_secret[1]

        if self.action in ['rekey']:
            encrypt_vault_id = context.CLIARGS[
                'encrypt_vault_id'] or C.DEFAULT_VAULT_ENCRYPT_IDENTITY
            # print('encrypt_vault_id: %s' % encrypt_vault_id)
            # print('default_encrypt_vault_id: %s' % default_encrypt_vault_id)

            # new_vault_ids should only ever be one item, from
            # load the default vault ids if we are using encrypt-vault-id
            new_vault_ids = []
            if encrypt_vault_id:
                new_vault_ids = default_vault_ids
            if context.CLIARGS['new_vault_id']:
                new_vault_ids.append(context.CLIARGS['new_vault_id'])

            new_vault_password_files = []
            if context.CLIARGS['new_vault_password_file']:
                new_vault_password_files.append(
                    context.CLIARGS['new_vault_password_file'])

            new_vault_secrets = \
                self.setup_vault_secrets(loader,
                                         vault_ids=new_vault_ids,
                                         vault_password_files=new_vault_password_files,
                                         ask_vault_pass=context.CLIARGS['ask_vault_pass'],
                                         create_new_password=True)

            if not new_vault_secrets:
                raise AnsibleOptionsError(
                    "A new vault password is required to use Ansible's Vault rekey"
                )

            # There is only one new_vault_id currently and one new_vault_secret, or we
            # use the id specified in --encrypt-vault-id
            new_encrypt_secret = match_encrypt_secret(
                new_vault_secrets, encrypt_vault_id=encrypt_vault_id)

            self.new_encrypt_vault_id = new_encrypt_secret[0]
            self.new_encrypt_secret = new_encrypt_secret[1]

        loader.set_vault_secrets(vault_secrets)

        # FIXME: do we need to create VaultEditor here? its not reused
        vault = VaultLib(vault_secrets)
        self.editor = VaultEditor(vault)

        self.execute()

        # and restore umask
        os.umask(old_umask)
Пример #56
0
        fh.write(
            "# This file reflects the paths in the pipeline configuration\n")
        fh.write("# entity which is associated with this location (%s).\n" %
                 new_name)
        fh.write("\n")
        fh.write("Windows: '%s'\n" % target_win)
        fh.write("Darwin: '%s'\n" % target_mac)
        fh.write("Linux: '%s'\n" % target_linux)
        fh.write("\n")
        fh.write("# End of file.\n")
        fh.close()

    except Exception, e:
        raise TankError("Could not create file system structure: %s" % e)
    finally:
        os.umask(old_umask)

    # finally register with shotgun
    data = {
        "linux_path": target_linux,
        "windows_path": target_win,
        "mac_path": target_mac,
        "code": new_name,
        "project": source_pc["project"],
        "users": [{
            "type": "HumanUser",
            "id": user_id
        }]
    }
    log.debug("Create sg: %s" % str(data))
    pc_entity = tk.shotgun.create(constants.PIPELINE_CONFIGURATION_ENTITY,
    def execute(self, items, preview_mode, **kwargs):
        """
        Creates a list of files and folders.

        The default implementation creates files and folders recursively using
        open permissions.

        :param list(dict): List of actions that needs to take place.

        Six different types of actions are supported.

        **Standard Folder**

        This represents a standard folder in the file system which is not associated
        with anything in Shotgun. It contains the following keys:

        - **action** (:class:`str`) - ``folder``
        - **metadata** (:class:`dict`) - The configuration yaml data for this item
        - **path** (:class:`str`) - path on disk to the item

        **Entity Folder**

        This represents a folder in the file system which is associated with a
        Shotgun entity. It contains the following keys:

        - **action** (:class:`str`) - ``entity_folder``
        - **metadata** (:class:`dict`) - The configuration yaml data for this item
        - **path** (:class:`str`) - path on disk to the item
        - **entity** (:class:`dict`) - Shotgun entity link with keys ``type``, ``id`` and ``name``.

        **Remote Entity Folder**

        This is the same as an entity folder, except that it was originally
        created in another location. A remote folder request means that your
        local toolkit instance has detected that folders have been created by
        a different file system setup. It contains the following keys:

        - **action** (:class:`str`) - ``remote_entity_folder``
        - **metadata** (:class:`dict`) - The configuration yaml data for this item
        - **path** (:class:`str`) - path on disk to the item
        - **entity** (:class:`dict`) - Shotgun entity link with keys ``type``, ``id`` and ``name``.

        **File Copy**

        This represents a file copy operation which should be carried out.
        It contains the following keys:

        - **action** (:class:`str`) - ``copy``
        - **metadata** (:class:`dict`) - The configuration yaml data associated with the directory level
          on which this object exists.
        - **source_path** (:class:`str`) - location of the file that should be copied
        - **target_path** (:class:`str`) - target location to where the file should be copied.

        **File Creation**

        This is similar to the file copy, but instead of a source path, a chunk
        of data is specified. It contains the following keys:

        - **action** (:class:`str`) - ``create_file``
        - **metadata** (:class:`dict`) - The configuration yaml data associated with the directory level
          on which this object exists.
        - **content** (:class:`str`) -- file content
        - **target_path** (:class:`str`) -- target location to where the file should be copied.

        **Symbolic Links**

        This represents a request that a symbolic link is created. Note that symbolic links are not
        supported in the same way on all operating systems. The default hook therefore does not
        implement symbolic link support on Windows systems. If you want to add symbolic link support
        on windows, simply copy this hook to your project configuration and make the necessary
        modifications.

        - **action** (:class:`str`) - ``symlink``
        - **metadata** (:class:`dict`) - The raw configuration yaml data associated with symlink yml config file.
        - **path** (:class:`str`) - the path to the symbolic link
        - **target** (:class:`str`) - the target to which the symbolic link should point

        :returns: List of files and folders that have been created.
        :rtype: list(str)
        """

        # set the umask so that we get true permissions
        old_umask = os.umask(0)
        locations = []
        try:

            # loop through our list of items
            for i in items:

                action = i.get("action")

                if action in ["entity_folder", "folder"]:
                    # folder creation
                    path = i.get("path")
                    if not os.path.exists(path):
                        if not preview_mode:
                            # create the folder using open permissions
                            try:
                                os.makedirs(path, 0o777)
                            except:
                                print("NO PERMISIONS TO CREATE FOLDER: ", path)
                        locations.append(path)

                elif action == "remote_entity_folder":
                    # Remote folder creation
                    #
                    # NOTE! This action happens when another user has created
                    # a folder on their machine and we are syncing our local path
                    # cache to be aware of this folder's existance.
                    #
                    # For a traditional setup, where the project storage is shared,
                    # there is no need to do I/O for remote folders - these folders
                    # have already been created on the remote storage so you have access
                    # to them already.
                    #
                    # On a setup where each user or group of users is attached to
                    # different, independendent file storages, which are synced,
                    # it may be meaningful to "replay" the remote folder creation
                    # on the local system. This would result in the same folder
                    # scaffold on each disk which is storing project data.
                    #
                    # path = i.get("path")
                    # if not os.path.exists(path):
                    #     if not preview_mode:
                    #         # create the folder using open permissions
                    #         os.makedirs(path, 0777)
                    #     locations.append(path)
                    pass

                elif action == "symlink":
                    # symbolic link
                    if is_windows():
                        # no windows support
                        continue
                    path = i.get("path")
                    target = i.get("target")
                    # note use of lexists to check existance of symlink
                    # rather than what symlink is pointing at
                    if not os.path.lexists(path):
                        if not preview_mode:
                            os.symlink(target, path)
                        locations.append(path)

                elif action == "copy":
                    # a file copy
                    source_path = i.get("source_path")
                    target_path = i.get("target_path")
                    if not os.path.exists(target_path):
                        if not preview_mode:
                            # do a standard file copy
                            try:
                                shutil.copy(source_path, target_path)
                                # set permissions to open
                                os.chmod(target_path, 0o666)
                            except:
                                print("NO PERMISIONS TO COPY FILE: ",
                                      target_path)
                        locations.append(target_path)

                elif action == "create_file":
                    # create a new file based on content
                    path = i.get("path")
                    parent_folder = os.path.dirname(path)
                    content = i.get("content")
                    if not os.path.exists(parent_folder) and not preview_mode:
                        os.makedirs(parent_folder, 0o777)
                    if not os.path.exists(path):
                        if not preview_mode:
                            # create the file
                            try:
                                fp = open(path, "wb")
                                fp.write(six.ensure_binary(content))
                                fp.close()
                                # and set permissions to open
                                os.chmod(path, 0o666)
                            except:
                                print("NO PERMISSIONS TO CREATE FILE: ", path)
                        locations.append(path)

        finally:
            # reset umask
            os.umask(old_umask)

        return locations
Пример #58
0
def tearDown(testobj):
    os.umask(testobj._old_umask)
Пример #59
0
def main():
    parser = argparse.ArgumentParser(description='save blog data')
    parser.add_argument('--name', '-n', type=str, default='')
    args = parser.parse_args()

    # convert us-ascii to utf-8
    sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')

    umask = os.umask(0)
    if os.path.isdir('./members/') == False:
        os.mkdir('./members/', 0o777)
        #os.chmod('./members/', 0o777)
        os.umask(umask)

    url = "http://blog.nogizaka46.com/"
    name = ""
    today = datetime.now()
    year = today.strftime('%Y')
    month = today.strftime('%m')
    current_date = year + month
    date = current_date
    pageNum = 1
    #date = str(2012) + str(0) + str(3)

    if args.name == '':
        savlog_general(url)
    else:
        if args.name == "秋元真夏":
            name = "manatsu.akimoto/"
        elif args.name == "生田絵梨花":
            name = "erika.ikuta/"
        elif args.name == "伊藤かりん":
            name = "karin.itou/"
        elif args.name == "伊藤純奈":
            name = "junna.itou/"
        elif args.name == "伊藤理々杏":
            name = "riria.ito/"
        elif args.name == "井上小百合":
            name = "sayuri.inoue/"
        elif args.name == "岩本蓮加":
            name = "renka.iwamoto/"
        elif args.name == "梅澤美波":
            name = "minami.umezawa/"
        elif args.name == "衛藤美彩":
            name = "misa.eto/"
        elif args.name == "大園桃子":
            name = "momoko.ozono/"
        elif args.name == "川後陽菜":
            name = "hina.kawago/"
        elif args.name == "北野日奈子":
            name = "hinako.kitano/"
        elif args.name == "久保史緒里":
            name = "shiori.kubo/"
        elif args.name == "齋藤飛鳥":
            name = "asuka.saito/"
        elif args.name == "斉藤優里":
            name = "yuuri.saito/"
        elif args.name == "阪口珠美":
            name = "tamami.sakaguchi/"
        elif args.name == "桜井玲香":
            name = "reika.sakurai/"
        elif args.name == "佐々木琴子":
            name = "kotoko.sasaki/"
        elif args.name == "佐藤楓":
            name = "kaede.sato/"
        elif args.name == "白石麻衣":
            name = "mai.shiraishi/"
        elif args.name == "新内眞衣":
            name = "mai.shinuchi/"
        elif args.name == "鈴木絢音":
            name = "ayane.suzuki/"
        elif args.name == "高山一実":
            name = "kazumi.takayama/"
        elif args.name == "寺田蘭世":
            name = "ranze.terada/"
        elif args.name == "中田花奈":
            name = "kana.nakada/"
        elif args.name == "中村麗乃":
            name = "reno.nakamura/"
        elif args.name == "西野七瀬":
            name = "nanase.nishino/"
        elif args.name == "能條愛未":
            name = "ami.noujo/"
        elif args.name == "樋口日奈":
            name = "hina.higuchi/"
        elif args.name == "星野みなみ":
            name = "minami.hoshino/"
        elif args.name == "堀未央奈":
            name = "miona.hori/"
        elif args.name == "松村沙友理":
            name = "sayuri.matsumura/"
        elif args.name == "向井葉月":
            name = "haduki.mukai"
        elif args.name == "山崎怜奈":
            name = "rena.yamazaki/"
        elif args.name == "山下美月":
            name = "miduki.yamashita/"
        elif args.name == "吉田綾乃クリスティー":
            name = "ayano.christie.yoshida/"
        elif args.name == "与田祐希":
            name = "yuuki.yoda"
        elif args.name == "若月佑美":
            name = "yumi.wakatsuki/"
        elif args.name == "渡辺みり愛":
            name = "miria.watanabe/"
        elif args.name == "和田まあや":
            name = "maaya.wada/"
        else:
            sys.stderr.write("There is no member! ")

    member_path = './members/' + name
    first_url = url + name
    roop_counter = 1
    fake_counter = 0

    while True:
        print('search date is ' + date)
        counter = 0
        current_url = url + name + "?d=" + date
        roop_counter += 1
        first_url_days = return_date(first_url)
        current_url_days = return_date(current_url)
        for i in range(len(current_url_days)):
            if first_url_days[i] == current_url_days[i]:
                counter += 1

        # this page is the same as first-url's page
        if counter == len(first_url_days) == len(current_url_days):
            counter = 0
            # just in case
            # suspicious_month = str(0) + str(int(month) - 1)
            # 1月サーチの場合 前年12月にブログ記事があるかチェック
            if int(month) == 1:
                suspicious_month = str(12)
                suspicious_year = str(int(year) - 1)
            # 前年に繰越にならない場合
            else:
                suspicious_month = str(
                    int(month) -
                    1) if int(month) > 10 else str(0) + str(int(month) - 1)
                suspicious_year = year
            suspicious_date = suspicious_year + suspicious_month
            suspicious_url = url + name + "?d=" + suspicious_date
            suspicious_days = return_date(suspicious_url)
            print('suspicious date is ' + suspicious_date)
            for i in range(len(suspicious_days)):
                if first_url_days[i] == suspicious_days[i]:
                    counter += 1
            # not updated blog for two conecutive months -> there is no article cuz it is too previous year
            if counter == len(first_url_days) == len(suspicious_days):
                print('nothing!! do more previous articles exist?')
                fake_counter += 1
                if fake_counter < 12:
                    month = suspicious_month
                    year = suspicious_year
                    date = suspicious_date
                else:
                    break
            else:
                month = suspicious_month
                year = suspicious_year
                date = suspicious_date

        else:
            print('start')
            # processing when page number is 1
            search_url = url + name + "?d=" + date
            savlog_individual(search_url, member_path)
            pageNum += 1
            print('pagenum 0 finished!')

            next_year, next_month = next_pages(url, search_url, name,
                                               member_path, year, month, date,
                                               pageNum)
            year = next_year
            month = next_month
            date = year + month
            print('round' + str(roop_counter) + 'finished!!')

    print('saving is finished!!')
Пример #60
0
def daemonize(enable_stdio_inheritance=False):
    """\
    Standard daemonization of a process.
    http://www.svbug.com/documentation/comp.unix.programmer-FAQ/faq_2.html#SEC16
    """
    if 'GUNICORN_FD' not in os.environ:
        if os.fork():
            os._exit(0)
        os.setsid()

        if os.fork():
            os._exit(0)

        os.umask(0o22)

        # In both the following any file descriptors above stdin
        # stdout and stderr are left untouched. The inheritence
        # option simply allows one to have output go to a file
        # specified by way of shell redirection when not wanting
        # to use --error-log option.

        if not enable_stdio_inheritance:
            # Remap all of stdin, stdout and stderr on to
            # /dev/null. The expectation is that users have
            # specified the --error-log option.

            closerange(0, 3)

            fd_null = os.open(REDIRECT_TO, os.O_RDWR)

            if fd_null != 0:
                os.dup2(fd_null, 0)

            os.dup2(fd_null, 1)
            os.dup2(fd_null, 2)

        else:
            fd_null = os.open(REDIRECT_TO, os.O_RDWR)

            # Always redirect stdin to /dev/null as we would
            # never expect to need to read interactive input.

            if fd_null != 0:
                os.close(0)
                os.dup2(fd_null, 0)

            # If stdout and stderr are still connected to
            # their original file descriptors we check to see
            # if they are associated with terminal devices.
            # When they are we map them to /dev/null so that
            # are still detached from any controlling terminal
            # properly. If not we preserve them as they are.
            #
            # If stdin and stdout were not hooked up to the
            # original file descriptors, then all bets are
            # off and all we can really do is leave them as
            # they were.
            #
            # This will allow 'gunicorn ... > output.log 2>&1'
            # to work with stdout/stderr going to the file
            # as expected.
            #
            # Note that if using --error-log option, the log
            # file specified through shell redirection will
            # only be used up until the log file specified
            # by the option takes over. As it replaces stdout
            # and stderr at the file descriptor level, then
            # anything using stdout or stderr, including having
            # cached a reference to them, will still work.

            def redirect(stream, fd_expect):
                try:
                    fd = stream.fileno()
                    if fd == fd_expect and stream.isatty():
                        os.close(fd)
                        os.dup2(fd_null, fd)
                except AttributeError:
                    pass

            redirect(sys.stdout, 1)
            redirect(sys.stderr, 2)