def clixxIOSetupSHM(): """ Sensor Data is exchanged using Shared Memory on Linux. This method configures the shared memory space so that multiple clients can read/write to the space. """ global clixxIOshmfd, clixxIOshmBuff if clixxIOshmfd is None: # Create new empty file to back memory map on disk if not os.path.exists(clixxIOshmPath): clixxIOshmfd = os.open( clixxIOshmPath, os.O_CREAT | os.O_TRUNC | os.O_RDWR) # Zero out the file to insure it's the right size os.write(clixxIOshmfd, ' ' * mmap.PAGESIZE) os.lseek(clixxIOshmfd, 0, os.SEEK_SET) os.write(clixxIOshmfd, '\n') else: clixxIOshmfd = os.open(clixxIOshmPath, os.O_RDWR) # Create the mmap instace with the following params: # fd: File descriptor which backs the mapping or -1 for anonymous mapping # length: Must in multiples of PAGESIZE (usually 4 KB) # flags: MAP_SHARED means other processes can share this mmap # prot: PROT_WRITE means this process can write to this mmap clixxIOshmBuff = mmap.mmap( clixxIOshmfd, mmap.PAGESIZE, mmap.MAP_SHARED, mmap.PROT_WRITE)
def get_lock_file(lockfile): """ Try to acquire a lock file Returns (acquire, fd) where 'acquire' is True if the lock is acquired and 'fd' is the file descriptor of the lock file. """ acquire = False while True: try: # try to create file with exclusive access fd = os.open(lockfile, os.O_CREAT|os.O_EXCL|os.O_RDWR, 0600) # creation succeeded, we have the lock acquire = True break except OSError, e: if e.errno != errno.EEXIST: # unknown error, re-raise raise try: # lock already exists (i.e. held by someone else) # try to open it in read only mode fd = os.open(lockfile, os.O_RDONLY) acquire = False break except OSError, e: if e.errno != errno.ENOENT: # unknown error, re-raise raise
def load(self, filename): """Optimized load and return the parsed version of filename. Uses the on-disk parse cache if the file is located in it. """ # Compute sha1 hash (key) with open(filename) as fp: key = sha1(fp.read()).hexdigest() path = self.key_to_path(key) # Return the cached file if available if key in self.hashes: try: with open(path) as fp: return cPickle.load(fp) except EOFError: os.unlink(path) self.hashes.remove(key) except IOError: self.hashes.remove(key) # Create the nested cache directory try: os.makedirs(os.path.dirname(path)) except OSError as exc: if exc.errno != errno.EEXIST: raise # Process the file and save in the cache scratch = kurt.Project.load(filename) # can fail with os.fdopen(os.open(path, os.O_WRONLY | os.O_CREAT, 0400), 'w') as fp: # open file for writing but make it immediately read-only cPickle.dump(scratch, fp, cPickle.HIGHEST_PROTOCOL)
def lock(self, path, exclusive=False, block=False): result = self._force fd = self._lock.get(path) if fd is None: # On Solaris, must be a file and must be writeable for exclusive lock if sys.platform[:5] == "sunos": sunpath = path if os.path.isdir(path): sunpath = path + "/.lock" if not os.path.exists(sunpath): fd = os.open(sunpath, os.O_WRONLY | os.O_CREAT) os.close(fd) fd = self._lock[path] = os.open(sunpath, os.O_RDWR) else: fd = self._lock[path] = os.open(path, os.O_RDONLY) flags = fcntl.fcntl(fd, fcntl.F_GETFD, 0) flags |= fcntl.FD_CLOEXEC fcntl.fcntl(fd, fcntl.F_SETFD, flags) if exclusive: flags = fcntl.LOCK_EX else: flags = fcntl.LOCK_SH if not block: flags |= fcntl.LOCK_NB try: fcntl.flock(fd, flags) result = True except IOError, e: pass
def write_env_var_to_conf_dir(var, value, conf_dir): env_jsonfile_path = os.path.join(conf_dir, 'environment.json') if var in CORE_VAR_NAMES: try: with open(env_jsonfile_path) as fd: env_vars = json.load(fd) except: env_vars = {} if value is None and var in env_vars: del env_vars[var] else: env_vars[var] = value # Make sure the file has 600 permissions try: os.remove(env_jsonfile_path) except: pass with os.fdopen(os.open(env_jsonfile_path, os.O_CREAT | os.O_WRONLY, 0o600), 'w') as fd: json.dump(env_vars, fd, indent=4) fd.write("\n") else: # DX_CLI_WD, DX_USERNAME, DX_PROJECT_CONTEXT_NAME # Make sure the file has 600 permissions try: os.remove(os.path.join(conf_dir, var)) except: pass with os.fdopen(os.open(os.path.join(conf_dir, var), os.O_CREAT | os.O_WRONLY, 0o600), 'w') as fd: fd.write(value.encode(sys_encoding) if is_py2 else value) if not os.path.exists(os.path.expanduser('~/.dnanexus_config/') + 'unsetenv'): with open(os.path.expanduser('~/.dnanexus_config/') + 'unsetenv', 'w') as fd: for var in CORE_VAR_NAMES: fd.write('unset ' + var + '\n')
def daemonize(): """\ Standard daemonization of a process. http://www.svbug.com/documentation/comp.unix.programmer-FAQ/faq_2.html#SEC16 """ if not 'GUNICORN_FD' in os.environ: if os.fork(): os._exit(0) os.setsid() if os.fork(): os._exit(0) os.umask(0) maxfd = get_maxfd() # Iterate through and close all file descriptors. for fd in range(0, maxfd): try: os.close(fd) except OSError: # ERROR, fd wasn't open to begin with (ignored) pass os.open(REDIRECT_TO, os.O_RDWR) os.dup2(0, 1) os.dup2(0, 2)
def os_open(*, flags, path=None, parent_fd=None, name=None, noatime=False): """ Use os.open to open a fs item. If parent_fd and name are given, they are preferred and openat will be used, path is not used in this case. :param path: full (but not necessarily absolute) path :param parent_fd: open directory file descriptor :param name: name relative to parent_fd :param flags: open flags for os.open() (int) :param noatime: True if access time shall be preserved :return: file descriptor """ if name and parent_fd is not None: # name is neither None nor empty, parent_fd given. fname = name # use name relative to parent_fd else: fname, parent_fd = path, None # just use the path _flags_normal = flags if noatime: _flags_noatime = _flags_normal | O_('NOATIME') try: # if we have O_NOATIME, this likely will succeed if we are root or owner of file: fd = os.open(fname, _flags_noatime, dir_fd=parent_fd) except PermissionError: if _flags_noatime == _flags_normal: # we do not have O_NOATIME, no need to try again: raise # Was this EPERM due to the O_NOATIME flag? Try again without it: fd = os.open(fname, _flags_normal, dir_fd=parent_fd) else: fd = os.open(fname, _flags_normal, dir_fd=parent_fd) return fd
def run_captured(self, cmd): """Run a command, capturing stdout and stderr. Based in part on popen2.py Returns (waitstatus, stdout, stderr).""" import os, types pid = os.fork() if pid == 0: # child try: pid = os.getpid() openmode = os.O_WRONLY|os.O_CREAT|os.O_TRUNC outfd = os.open('%d.out' % pid, openmode, 0666) os.dup2(outfd, 1) os.close(outfd) errfd = os.open('%d.err' % pid, openmode, 0666) os.dup2(errfd, 2) os.close(errfd) if isinstance(cmd, types.StringType): cmd = ['/bin/sh', '-c', cmd] os.execvp(cmd[0], cmd) finally: os._exit(127) else: # parent exited_pid, waitstatus = os.waitpid(pid, 0) stdout = open('%d.out' % pid).read() stderr = open('%d.err' % pid).read() return waitstatus, stdout, stderr
def openlock(filename, operation, wait=True): """ Returns a file-like object that gets a fnctl() lock. `operation` should be one of LOCK_SH or LOCK_EX for shared or exclusive locks. If `wait` is False, then openlock() will not block on trying to acquire the lock. """ f = os.fdopen(os.open(filename, os.O_RDWR | os.O_CREAT, 0666), "r+") if not wait: operation |= LOCK_NB try: lockf(f.fileno(), operation) except IOError as err: if not wait and err.errno in (EACCES, EAGAIN): from django.core.management.base import CommandError raise CommandError("Could not acquire lock on '%s' held by %s." % (filename, f.readline().strip())) raise print("%s:%d" % (socket.gethostname(), os.getpid()), file=f) f.truncate() f.flush() return f
def create_daemon(_pidfile): global pidfile pidfile = _pidfile if os.path.isfile(pidfile): print('pid file[' + pidfile + '] still exist. please check your system.') os._exit(1) if not os.path.isdir(os.path.dirname(pidfile)): os.mkdir(os.path.dirname(pidfile)) pid = os.fork() if pid == 0: os.setsid() with open(pidfile, 'w') as f: f.write(str(os.getpid())) os.chdir('/') os.umask(0) else: # parent goes bye bye os._exit(0) si = os.open('/dev/null', os.O_RDONLY) so = os.open('/dev/null', os.O_RDWR) se = os.open('/dev/null', os.O_RDWR) os.dup2(si, sys.stdin.fileno()) os.dup2(so, sys.stdout.fileno()) os.dup2(se, sys.stderr.fileno()) os.close(si) os.close(so) os.close(se) signal.signal(signal.SIGINT, signal_handler) signal.signal(signal.SIGHUP, signal_handler) signal.signal(signal.SIGTERM, signal_handler)
def record_site(self, job_status): """ Need a doc string here. """ job_status_name = None for name, code in JOB_RETURN_CODES._asdict().iteritems(): if code == job_status: job_status_name = name try: with os.fdopen( os.open( "task_statistics.%s.%s" % (self.site, job_status_name), os.O_APPEND | os.O_CREAT | os.O_RDWR, 0o644 ), "a", ) as fd: fd.write("%d\n" % (self.job_id)) except Exception as ex: self.logger.error(str(ex)) # Swallow the exception - record_site is advisory only try: with os.fdopen( os.open("task_statistics.%s" % (job_status_name), os.O_APPEND | os.O_CREAT | os.O_RDWR, 0o644), "a" ) as fd: fd.write("%d\n" % (self.job_id)) except Exception as ex: self.logger.error(str(ex))
def initLogging (debug): global logFile try: logFilename = "openstack-setup.log" logFile = os.path.join(basedefs.DIR_LOG, logFilename) # Create the log file with specific permissions, puppet has a habbit of putting # passwords in logs os.close(os.open(logFile, os.O_CREAT | os.O_EXCL, 0600)) hdlr = logging.FileHandler (filename=logFile, mode='w') if (debug): level = logging.DEBUG else: level = logging.INFO fmts='%(asctime)s::%(levelname)s::%(module)s::%(lineno)d::%(name)s:: %(message)s' dfmt='%Y-%m-%d %H:%M:%S' fmt = logging.Formatter(fmts, dfmt) hdlr.setFormatter(fmt) logging.root.handlers = [] logging.root.addHandler(hdlr) logging.root.setLevel(level) except: logging.error(traceback.format_exc()) raise Exception(output_messages.ERR_EXP_FAILED_INIT_LOGGER)
def runDebug(self, exc_info): if flags.can_touch_runtime_system("switch console") \ and self._intf_tty_num != 1: iutil.vtActivate(1) os.open("/dev/console", os.O_RDWR) # reclaim stdin os.dup2(0, 1) # reclaim stdout os.dup2(0, 2) # reclaim stderr # ^ # | # +------ dup2 is magic, I tells ya! # bring back the echo import termios si = sys.stdin.fileno() attr = termios.tcgetattr(si) attr[3] = attr[3] & termios.ECHO termios.tcsetattr(si, termios.TCSADRAIN, attr) print("\nEntering debugger...") print("Use 'continue' command to quit the debugger and get back to "\ "the main window") import pdb pdb.post_mortem(exc_info.stack) if flags.can_touch_runtime_system("switch console") \ and self._intf_tty_num != 1: iutil.vtActivate(self._intf_tty_num)
def daemonize(): """\ Standard daemonization of a process. Code is basd on the ActiveState recipe at: http://code.activestate.com/recipes/278731/ """ if not 'GUNICORN_FD' in os.environ: if os.fork() == 0: os.setsid() if os.fork() != 0: os.umask(0) else: os._exit(0) else: os._exit(0) maxfd = get_maxfd() # Iterate through and close all file descriptors. for fd in range(0, maxfd): try: os.close(fd) except OSError: # ERROR, fd wasn't open to begin with (ignored) pass os.open(REDIRECT_TO, os.O_RDWR) os.dup2(0, 1) os.dup2(0, 2)
def dump_command_to_json(self, command, retry=False): """ Converts command to json file and returns file path """ # Perform few modifications to stay compatible with the way in which public_fqdn = self.public_fqdn command['public_hostname'] = public_fqdn # Add cache dir to make it visible for commands command["hostLevelParams"]["agentCacheDir"] = self.config.get('agent', 'cache_dir') # Now, dump the json file command_type = command['commandType'] from ActionQueue import ActionQueue # To avoid cyclic dependency if command_type == ActionQueue.STATUS_COMMAND: # These files are frequently created, that's why we don't # store them all, but only the latest one file_path = os.path.join(self.tmp_dir, "status_command.json") else: task_id = command['taskId'] if 'clusterHostInfo' in command and command['clusterHostInfo'] and not retry: command['clusterHostInfo'] = self.decompressClusterHostInfo(command['clusterHostInfo']) file_path = os.path.join(self.tmp_dir, "command-{0}.json".format(task_id)) if command_type == ActionQueue.AUTO_EXECUTION_COMMAND: file_path = os.path.join(self.tmp_dir, "auto_command-{0}.json".format(task_id)) # Json may contain passwords, that's why we need proper permissions if os.path.isfile(file_path): os.unlink(file_path) with os.fdopen(os.open(file_path, os.O_WRONLY | os.O_CREAT, 0600), 'w') as f: content = json.dumps(command, sort_keys = False, indent = 4) f.write(content)
def is_direct_io_supported(dbname): with tempfile.NamedTemporaryFile(dir=dbname) as f: try: os.open(f.name, os.O_DIRECT) except: return False return True
def attach(self, part_id, path): """ Attaches MicroSAN as Network Block Device """ # Open NBD device nbd_fd = os.open(path, os.O_RDWR) # Set kernel parameters for NBD sysfs_name = '/sys/block/%s/queue/max_sectors_kb' % os.path.basename(path) sysfs = os.open(sysfs_name, os.O_RDWR) os.write(sysfs, '8') os.close(sysfs) # Resolve partition ip address part_ipaddr = self.name_res(part_id)[1] # Get partition information part_info = self._query_part(part_ipaddr)[0] # Parse partition information part_data = uSanProto.Partitions.parse_root_info(part_info[0]) part_size = part_data[1] # Size of blocks (12 = 4KB block) block_size_power = 12 block_size = part_size >> block_size_power # Set block sizes on device ioc = fcntl.ioctl(nbd_fd, NBD_SET_BLKSIZE, long(1 << block_size_power)) if ioc < 0: raise IOError('nbd_fd cannot set NBD_SET_BLKSIZE') ioc = fcntl.ioctl(nbd_fd, NBD_SET_SIZE_BLOCKS, block_size) if ioc < 0: raise IOError('nbd_fd cannot set NBD_SET_SIZE_BLOCKS')
def write_env_var_to_conf_dir(var, value, conf_dir): env_jsonfile_path = os.path.join(conf_dir, 'environment.json') std_vars = ['DX_APISERVER_HOST', 'DX_APISERVER_PORT', 'DX_APISERVER_PROTOCOL', 'DX_PROJECT_CONTEXT_ID', 'DX_WORKSPACE_ID', 'DX_SECURITY_CONTEXT'] if var in std_vars: try: with open(env_jsonfile_path) as fd: env_vars = json.load(fd) except: env_vars = {} if value is None and var in env_vars: del env_vars[var] else: env_vars[var] = value # Make sure the file has 600 permissions try: os.remove(env_jsonfile_path) except: pass with os.fdopen(os.open(env_jsonfile_path, os.O_CREAT | os.O_WRONLY, 0o600), 'w') as fd: json.dump(env_vars, fd, indent=4) fd.write("\n") else: # DX_CLI_WD, DX_USERNAME, DX_PROJECT_CONTEXT_NAME # Make sure the file has 600 permissions try: os.remove(os.path.join(conf_dir, var)) except: pass with os.fdopen(os.open(os.path.join(conf_dir, var), os.O_CREAT | os.O_WRONLY, 0o600), 'w') as fd: fd.write(value) if not os.path.exists(os.path.expanduser('~/.dnanexus_config/') + 'unsetenv'): with open(os.path.expanduser('~/.dnanexus_config/') + 'unsetenv', 'w') as fd: for var in std_vars: fd.write('unset ' + var + '\n')
def __executeInternal(self): """ Executes the internal key creation using pyopenssl. """ bits = int(self.__optsMap["--bits"][:1][0]) keyout = self.__optsMap["--keyout"][:1][0] fp = os.fdopen(os.open(keyout, os.O_WRONLY | os.O_CREAT, 0600), "w") print "generating key." crypto = self.__openssl.crypto key = crypto.PKey() key.generate_key(crypto.TYPE_RSA, bits) print "write key to file." fp.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, key)) fp.flush() fp.close() print "done" if self.__name == "serverkey": csrout = self.__optsMap["--csrout"][:1][0] domains = self.__optsMap["--domain"] fp = open(csrout, "w") print "generating csr." req = crypto.X509Req() req.get_subject().CN = domains[0] req.set_pubkey(key) extensions = [crypto.X509Extension("subjectAltName", False, ", ".join(["DNS:%s" % x for x in domains]))] req.add_extensions(extensions) print "writing csr." fp.write(crypto.dump_certificate_request(crypto.FILETYPE_PEM, req)) fp.flush() fp.close()
def _fsync_files(filenames): """Call fsync() a list of file names The filenames should be absolute paths already. """ touched_directories = set() mode = os.O_RDONLY # Windows if hasattr(os, 'O_BINARY'): mode |= os.O_BINARY for filename in filenames: fd = os.open(filename, mode) os.fsync(fd) os.close(fd) touched_directories.add(os.path.dirname(filename)) # Some OSes also require us to fsync the directory where we've # created files or subdirectories. if hasattr(os, 'O_DIRECTORY'): for dirname in touched_directories: fd = os.open(dirname, os.O_RDONLY | os.O_DIRECTORY) os.fsync(fd) os.close(fd)
def __init__(self, config_dir): self.app = Flask(__name__) self.service_map = {} self.event_map = {} signal.signal(signal.SIGTERM, self.term_handler) signal.signal(signal.SIGQUIT, self.term_handler) signal.signal(signal.SIGINT, self.term_handler) with open(config_dir) as config_fd: self.api_config = yaml.load(config_fd) self.api_fifo_name = str(uuid.uuid4()) + '.fifo' self.api_fifo_path = os.path.join(ApiConstants.API_PIPE_DIR, self.api_fifo_name) os.mkfifo(self.api_fifo_path) try: self.api_fifo_fd = os.open(self.api_fifo_path, os.O_NONBLOCK) self.api_fifo_file = os.fdopen(self.api_fifo_fd) except (IOError, OSError) as exc: print ("Unable to read the fifo file due to error {0} " .format(exc)) raise if not os.path.exists(self.api_config['moirai_input_fifo']): os.mkfifo(self.api_config['moirai_input_fifo']) try: self.moirai_fifo_fd = os.open(self.api_config['moirai_input_fifo'], os.O_WRONLY | os.O_NONBLOCK) self.moirai_fifo_file = os.fdopen(self.moirai_fifo_fd, 'w') except (IOError, OSError) as exc: print "Unable to connect to Moirai Server" self.moirai_fifo_fd = None self.setup_routes() self.command_id = 0
def fork_daemon(): # Adapted from Chad J. Schroeder's recipe # @see http://code.activestate.com/recipes/278731/ pid = os.fork() if (pid == 0): # parent 2 os.setsid() pid = os.fork() if (pid == 0): # child os.chdir('/') os.umask(022) else: # parent 2 # print "Parent 2 (", pid, ") exiting" os._exit(0) else: # parent 1 # print "Parent 1 (", pid, ") exiting" os._exit(0) # print "Child changing i/o" try: fd_inp = os.open(stdin, os.O_RDONLY) os.dup2(fd_inp, 0) fd_out = os.open(stdout, os.O_WRONLY|os.O_CREAT, 0600) os.dup2(fd_out, 1) fd_err = os.open(stderr, os.O_WRONLY|os.O_CREAT, 0600) os.dup2(fd_err, 2) except Exception, e: err = "Error with duping I/O, e=%s" % str(e) raise PDError(err)
def gather_keys(): # setup pp = pprint.PrettyPrinter(indent=4) print('** OAuth Python Library Example **\n') client = FitbitOauthClient(CLIENT_KEY, CLIENT_SECRET) # get request token print('* Obtain a request token ...\n') token = client.fetch_request_token() print('RESPONSE') pp.pprint(token) print('') print('* Authorize the request token in your browser\n') stderr = os.dup(2) os.close(2) os.open(os.devnull, os.O_RDWR) webbrowser.open(client.authorize_token_url()) os.dup2(stderr, 2) try: verifier = raw_input('Verifier: ') except NameError: # Python 3.x verifier = input('Verifier: ') # get access token print('\n* Obtain an access token ...\n') token = client.fetch_access_token(verifier) print('RESPONSE') pp.pprint(token) print('')
def __init__(self, path, flag='r', mode=0666): """Create an dict backed by a SQLite DB at `sqlite_db_path`. See `open` for explanation of the parameters. """ if flag not in ('c', 'n', 'w', 'r'): raise error('Invalid flag "%s"' % (flag,)) # Default behavior is to create if the file does not already exist. # We tweak from this default behavior to accommodate the other flag options self.readonly = flag == 'r' # Allow for :memory: sqlite3 path for testing purposes if path != ':memory:': # Need an absolute path to db on the filesystem path = os.path.abspath(path) # r and w require the db to exist ahead of time if not os.path.exists(path): if flag in ('r', 'w'): raise error('DB does not exist at %s' % (path,)) else: # Ghetto way of respecting mode, since unexposed by sqlite3.connect # Manually create the file before sqlite3 connects to it os.open(path, os.O_CREAT, mode) self.conn = sqlite3.connect(path) self.conn.text_factory = str self.conn.execute(_CREATE_TABLE) # n option requires us to clear out existing data if flag == 'n': self.clear()
def daemonize(keepfd=None, chdir='/'): os.umask(0) if chdir: os.chdir(chdir) else: os.chdir('/') os.setgid(os.getgid()) # relinquish elevations os.setuid(os.getuid()) # relinquish elevations # Double fork to daemonize if os.fork() > 0: os._exit(0) # Parent exits os.setsid() # Obtain new process group if os.fork() > 0: os._exit(0) # Parent exits # Signal handling signal.signal(signal.SIGTERM, signal.SIG_IGN) signal.signal(signal.SIGINT, signal.SIG_IGN) # Close open files maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1] if maxfd == resource.RLIM_INFINITY: maxfd = 256 for fd in reversed(range(maxfd)): try: if fd != keepfd: os.close(fd) except OSError: _, exc, _ = sys.exc_info() if exc.errno != errno.EBADF: raise # Redirect I/O to /dev/null os.dup2(os.open(os.devnull, os.O_RDWR), sys.stdin.fileno()) os.dup2(os.open(os.devnull, os.O_RDWR), sys.stdout.fileno()) os.dup2(os.open(os.devnull, os.O_RDWR), sys.stderr.fileno())
def SetupDaemonFDs(output_file, output_fd): """Setups up a daemon's file descriptors. @param output_file: if not None, the file to which to redirect stdout/stderr @param output_fd: if not None, the file descriptor for stdout/stderr """ # check that at most one is defined assert [output_file, output_fd].count(None) >= 1 # Open /dev/null (read-only, only for stdin) devnull_fd = os.open(os.devnull, os.O_RDONLY) output_close = True if output_fd is not None: output_close = False elif output_file is not None: # Open output file try: output_fd = os.open(output_file, os.O_WRONLY | os.O_CREAT | os.O_APPEND, 0600) except EnvironmentError, err: raise Exception("Opening output file failed: %s" % err)
def update_file(path, updater, merger = lambda f: True): """update a file in a transaction-like manner""" fr = fw = None try: fd = os.open(path, os.O_CREAT|os.O_RDWR) try: fr = os.fdopen(fd, 'r+b') except: os.close(fd) raise fcntl.lockf(fr, fcntl.LOCK_EX) if not merger(fr): return tmpp = path + '.tmp.' + str(os.getpid()) fd = os.open(tmpp, os.O_CREAT|os.O_EXCL|os.O_WRONLY) try: fw = os.fdopen(fd, 'wb', 0) except: os.close(fd) raise updater(fw) os.fsync(fd) os.rename(tmpp, path) finally: for fx in (fr, fw): if fx: fx.close()
def test00CropFail(self): # make the dirs cam = moduleUnderTest.cameras[0] indir = os.path.join(moduleUnderTest.root, "2013-07-01", cam.shortname) os.makedirs(os.path.join(indir, "hires")) # put a fragment of a test jpg in the indir tfn = "SampleImage.jpg" tfd = os.open(tfn, os.O_RDONLY | os.O_BINARY) buf = os.read(tfd, 8192) logging.info("test00CropFail(): buf size is %d" % len(buf)) os.close(tfd) ifn = "12-00-01-12345.jpg" ifp = os.path.join(indir, ifn) infd = os.open(ifp, os.O_WRONLY | os.O_BINARY | os.O_CREAT) os.write(infd, buf) os.fsync(infd) os.close(infd) time.sleep(2) hfp = os.path.join(indir, "hires", ifn) # run processImage(). # Since the mod time is recent, The file should stay in indir moduleUnderTest.processImage(indir, ifn, cam) assert os.path.exists(ifp) and not os.path.exists(hfp) # set the file's mod time back over an hour and run processImage(). # This time the file should move to the hires dir os.utime(ifp, (int(time.time()), time.time() - 3602)) moduleUnderTest.processImage(indir, ifn, cam) assert not os.path.exists(ifp) and os.path.exists(hfp)
def __init__(self, file, mode="r", offset=0, order=None): # headers self.data_header = struct.Struct("!I") self.header = struct.Struct("!QQ") # open file prot = mmap.PROT_READ | mmap.PROT_WRITE readonly = False is_new = False if mode == "r": prot, fd = mmap.PROT_READ, os.open(file, os.O_RDONLY) readonly = True elif mode == "w": fd = os.open(file, os.O_RDWR) elif mode == "c": is_new, fd = not os.path.lexists(file), os.open(file, os.O_RDWR | os.O_CREAT) elif mode == "n": is_new, fd = True, os.open(file, os.O_RDWR | os.O_TRUNC) else: raise ValueError("Unsupported open mode") # create mmap if is_new: os.write(fd, b"\x00" * self.header.size) stream = mmap.mmap(fd, 0, mmap.MAP_SHARED, prot) StreamSack.__init__(self, stream, offset, order, is_new, readonly=readonly)
def daemonize (logger): class DevNull (object): def __init__ (self): self.fd = os.open ("/dev/null", os.O_WRONLY) def write (self, *args, **kwargs): return 0 def read (self, *args, **kwargs): return 0 def fileno (self): return self.fd def close (self): os.close (self.fd) class ErrorLog: def __init__ (self, obj): self.obj = obj def write (self, string): self.obj.log (logging.ERROR, string) def read (self, *args, **kwargs): return 0 def close (self): pass if os.fork () != 0: ## allow the child pid to instanciate the server ## class sleep (1) sys.exit (0) os.setsid () fd = os.open ('/dev/null', os.O_RDONLY) if fd != 0: os.dup2 (fd, 0) os.close (fd) null = DevNull () log = ErrorLog (logger) sys.stdout = null sys.stderr = log sys.stdin = null fd = os.open ('/dev/null', os.O_WRONLY) #if fd != 1: os.dup2 (fd, 1) os.dup2 (sys.stdout.fileno (), 1) if fd != 2: os.dup2 (fd, 2) if fd not in (1, 2): os.close (fd)
def power_read(self, read_path="/home/nvidia/project/test/pipeline.out"): rf = os.open(read_path, os.O_RDONLY) s = os.read(rf, 1024) os.close(rf) return float(s.decode())
# pyg.draw.circle(surf, mycolor, (int(self.tx), int(self.ty)), 1, 0) # xcoord = (int(self.tx), int(self.ty)) # xcoord = (400, 400) xcoord = (self.tx + self.sininc, self.tx) ycoord = (self.tx, self.ty - self.sininc) # pyg.draw.aaline(surf, mycolor, xcoord, ycoord, 0) # if (self.idnum > 0 and num <= (Tmaxelements - 1)): # ycoord = (Tarray[num].tx, Tarray[num].ty) # pyg.draw.aaline(surf, mycolor, xcoord, ycoord, 0) # pyg.draw.circle(surf, mycolor, (int(self.tx), int(self.ty)), 1, 0) initParticles() fd = os.open("/Users/TeZ/Documents/PY/mypy", os.O_RDONLY) # Use os.fchdir() method to change the dir os.fchdir(fd) # ///////////////////////////////////////////// def check_events(): ##### CHECK EVENTS / KEYBOARD KEY PRESSED ##### for event in pyg.event.get(): # https://www.pygame.org/docs/ref/key.html if (event.type == pyg.KEYDOWN): if (event.key == pyg.K_ESCAPE): print("eXit") quit() elif (event.key == pyg.K_s): print("sAve")
#!/usr/bin/python import os, sqlite3, time from serial.serialutil import to_bytes conn = sqlite3.connect('/home/pi/workspace/raspi_net/server/db/sys.db') c = conn.cursor() print "Opened database successfully" cursor = c.execute("SELECT TEMP, HUMI FROM TEMP_HUMI ORDER BY ID DESC LIMIT 1") row = cursor.fetchone() command = str(round(row[0], 2)) + "_" + str(round(row[1], 2)) + "_" + str( time.strftime("%H", time.localtime())) print "Operation done successfully" conn.close() print command + "\n" d = to_bytes(command) serial = os.open('/dev/ttyACM0', os.O_RDWR | os.O_NOCTTY | os.O_NONBLOCK) os.write(serial, d)
def find(pattern): if os.path.exists(pattern): return os.open(pattern, os.O_RDWR) if os.path.exists("/dev/input/event%s" % pattern): return os.open("/dev/input/event%s" % pattern, os.O_RDWR) if ":" in pattern: pattern, idx = pattern.rsplit(":", 1) idx = int(idx) else: idx = 0 candidates = glob.glob("/dev/input/event*") candidates.sort(key=humanize) successful_opens = 0 for c in candidates: try: f = os.open(c, os.O_RDWR) except os.error: continue successful_opens += 1 name = get_name(f) print(f"note: name is {repr(name)}") if name.find(pattern) != -1 or fnmatch.fnmatch(name, pattern): if idx == 0: return f else: idx -= 1 continue try: phys = get_phys(f) except IOError: pass else: if phys.find(pattern) != -1 or fnmatch.fnmatch(phys, pattern): if idx == 0: return f else: idx -= 1 continue id = InputId.get(f) sid = "Bus=%s Vendor=%04x Product=%04x Version=%04x" % (\ id.bustype, id.vendor, id.product, id.version) if sid.find(pattern) != -1 or fnmatch.fnmatch(sid, pattern): if idx == 0: return f else: idx -= 1 continue sid = "%04x:%04x" % (id.vendor, id.product) if sid.find(pattern) != -1 or fnmatch.fnmatch(sid, pattern): if idx == 0: return f else: idx -= 1 continue os.close(f) if not successful_opens: raise LookupError("""\ No input devices could be opened. This usually indicates a misconfigured system. Please read the section 'PERMISSIONS AND UDEV' in the hal_input manpage""") raise LookupError( "No input device matching %r was found (%d devices checked)" % (pattern, successful_opens))
# close.py import os f = os.open('test.txt', os.O_RDONLY | os.O_CREAT) os.write(f, str.encode('success!')) os.close(f) print('success!')
completer = Completer(cli_globals) readline.set_completer(completer.complete) readline.parse_and_bind('tab: complete') # read history file # purge history from previous raw_input calls, etc readline.clear_history() try: readline.read_history_file(DEFAULT_HISTFILE) except EnvironmentError: # can't read or no such file try: # create new file rw only by user os.close(os.open(DEFAULT_HISTFILE, os.O_WRONLY, 0o600)) except OSError: pass # create interpreter and interact with user class CustomInteractiveConsole(code.InteractiveConsole): def write(self, data): sys.stdout.write(data) with token_renewer: # # test connectivity try: plugins.installable()
def readline(self): try: line = self._buffer[self._bufindex] except IndexError: pass else: self._bufindex += 1 self._lineno += 1 self._filelineno += 1 return line if not self._file: if not self._files: return "" self._filename = self._files[0] self._files = self._files[1:] self._filelineno = 0 self._file = None self._isstdin = False self._backupfilename = 0 if self._filename == '-': self._filename = '<stdin>' self._file = sys.stdin self._isstdin = True else: if self._inplace: self._backupfilename = (self._filename + (self._backup or ".bak")) try: os.unlink(self._backupfilename) except OSError: pass # The next few lines may raise OSError os.rename(self._filename, self._backupfilename) self._file = open(self._backupfilename, self._mode) try: perm = os.fstat(self._file.fileno()).st_mode except OSError: self._output = open(self._filename, "w") else: mode = os.O_CREAT | os.O_WRONLY | os.O_TRUNC if hasattr(os, 'O_BINARY'): mode |= os.O_BINARY fd = os.open(self._filename, mode, perm) self._output = os.fdopen(fd, "w") try: if hasattr(os, 'chmod'): os.chmod(self._filename, perm) except OSError: pass self._savestdout = sys.stdout sys.stdout = self._output else: # This may raise OSError if self._openhook: self._file = self._openhook(self._filename, self._mode) else: self._file = open(self._filename, self._mode) self._buffer = self._file.readlines(self._bufsize) self._bufindex = 0 if not self._buffer: self.nextfile() # Recursive call return self.readline()
def Authorize(self): ''' Authorize the application with Facebook. ''' global ACCESS_TOKEN ACCESS_TOKEN = None ENDPOINT = 'graph.facebook.com' REDIRECT_URI = 'http://127.0.0.1:8080/' ''' Requirements for Facebook Authentication ''' class RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler): def do_GET(self): self.send_response(200) self.send_header('Content-type', 'text/html') self.end_headers() global ACCESS_TOKEN code = urlparse.parse_qs(urlparse.urlparse( self.path).query).get('code') code = code[0] if code else None if code is None: self.wfile.write('Sorry, authentication failed.') raise AuthorizationError('Authorization Failed') response = get( '/oauth/access_token', { 'client_id': APP_ID, 'redirect_uri': REDIRECT_URI, 'client_secret': APP_SEC, 'code': code }) ACCESS_TOKEN = urlparse.parse_qs(response)['access_token'][0] self.wfile.write('You have successfully logged in to facebook.' 'You can close this window now.') def log_message(self, format, *args): return def get_url(path, args=None): args = args or {} if ACCESS_TOKEN: args['access_token'] = ACCESS_TOKEN if 'access_token' in args or 'client_secret' in args: endpoint = "https://" + ENDPOINT else: endpoint = "http://" + ENDPOINT return endpoint + path + '?' + urllib.urlencode(args) def get(path, args): return urllib2.urlopen(get_url(path, args=args)).read() ''' Steps to authenticate ''' ui_print(colored('Authorizing Facebook Account...', 'yellow')) auth_url = get_url( '/oauth/authorize', { 'client_id': APP_ID, 'redirect_uri': REDIRECT_URI, 'scope': 'publish_actions' }) ''' Silence webbrowser messages ''' savout = os.dup(1) os.close(1) os.open(os.devnull, os.O_RDWR) try: webbrowser.open(auth_url) finally: os.dup2(savout, 1) httpd = BaseHTTPServer.HTTPServer(('127.0.0.1', 8080), RequestHandler) while ACCESS_TOKEN is None: httpd.handle_request() ''' Update Config file with Token Keys ''' cfg = ConfigParser.RawConfigParser() cfg.read(__cfgfile__) if not cfg.has_section('Facebook'): cfg.add_section('Facebook') cfg.set('Facebook', 'Access Token', hexlify(ACCESS_TOKEN)) with open(__cfgfile__, 'wb') as configfile: cfg.write(configfile) if not self.VerifyCredentials(): self.Reset() raise AuthorizationError('Authorization Failed')
def touch(fname): fd = os.open(fname, os.O_WRONLY | os.O_CREAT) os.close(fd)
def touch(fname, mode=0o666, dir_fd=None, **kwargs): flags = os.O_CREAT | os.O_APPEND with os.fdopen(os.open(fname, flags=flags, mode=mode, dir_fd=dir_fd)) as f: os.utime(f.fileno() if os.utime in os.supports_fd else fname, dir_fd=None if os.supports_fd else dir_fd, **kwargs)
def run_test(self): chain_height = self.nodes[0].getblockcount() assert_equal(chain_height, 200) self.log.debug("Mine a single block to get out of IBD") self.nodes[0].generate(1) self.sync_all() self.log.debug("Send 5 transactions from node2 (to its own address)") for i in range(5): self.nodes[2].sendtoaddress(self.nodes[2].getnewaddress(), Decimal("10")) node2_balance = self.nodes[2].getbalance() self.sync_all() self.log.debug( "Verify that node0 and node1 have 5 transactions in their mempools" ) assert_equal(len(self.nodes[0].getrawmempool()), 5) assert_equal(len(self.nodes[1].getrawmempool()), 5) self.log.debug( "Stop-start the nodes. Verify that node0 has the transactions in its mempool and node1 does not. Verify that node2 calculates its balance correctly after loading wallet transactions." ) self.stop_nodes() self.start_node( 1 ) # Give this one a head-start, so we can be "extra-sure" that it didn't load anything later self.start_node(0) self.start_node(2) # Give bitcoind a second to reload the mempool wait_until(lambda: len(self.nodes[0].getrawmempool()) == 5, timeout=1) wait_until(lambda: len(self.nodes[2].getrawmempool()) == 5, timeout=1) # The others have loaded their mempool. If node_1 loaded anything, we'd probably notice by now: assert_equal(len(self.nodes[1].getrawmempool()), 0) # Verify accounting of mempool transactions after restart is correct self.nodes[2].syncwithvalidationinterfacequeue( ) # Flush mempool to wallet assert_equal(node2_balance, self.nodes[2].getbalance()) self.log.debug( "Stop-start node0 with -persistmempool=0. Verify that it doesn't load its mempool.dat file." ) self.stop_nodes() self.start_node(0, extra_args=["-persistmempool=0"]) # Give bitcoind a second to reload the mempool time.sleep(1) assert_equal(len(self.nodes[0].getrawmempool()), 0) self.log.debug( "Stop-start node0. Verify that it has the transactions in its mempool." ) self.stop_nodes() self.start_node(0) wait_until(lambda: len(self.nodes[0].getrawmempool()) == 5) mempooldat0 = os.path.join(self.options.tmpdir, 'node0', 'regtest', 'mempool.dat') mempooldat1 = os.path.join(self.options.tmpdir, 'node1', 'regtest', 'mempool.dat') self.log.debug( "Remove the mempool.dat file. Verify that savemempool to disk via RPC re-creates it" ) os.remove(mempooldat0) self.nodes[0].savemempool() assert os.path.isfile(mempooldat0) self.log.debug( "Stop nodes, make node1 use mempool.dat from node0. Verify it has 5 transactions" ) os.rename(mempooldat0, mempooldat1) self.stop_nodes() self.start_node(1, extra_args=[]) wait_until(lambda: len(self.nodes[1].getrawmempool()) == 5) self.log.debug( "Prevent bitcoind from writing mempool.dat to disk. Verify that `savemempool` fails" ) # to test the exception we are setting bad permissions on a tmp file called mempool.dat.new # which is an implementation detail that could change and break this test mempooldotnew1 = mempooldat1 + '.new' with os.fdopen(os.open(mempooldotnew1, os.O_CREAT, 0o000), 'w'): pass assert_raises_rpc_error(-1, "Unable to dump mempool to disk", self.nodes[1].savemempool) os.remove(mempooldotnew1)
assert(sz < 1000000) buf = sys.stdin.read(sz) assert(len(buf) == sz) argv = buf.split('\0') argv = [argv[0], 'mux', '--'] + argv # stdin/stdout are supposedly connected to 'bup server' that the caller # started for us (often on the other end of an ssh tunnel), so we don't want # to misuse them. Move them out of the way, then replace stdout with # a pointer to stderr in case our subcommand wants to do something with it. # # It might be nice to do the same with stdin, but my experiments showed that # ssh seems to make its child's stderr a readable-but-never-reads-anything # socket. They really should have used shutdown(SHUT_WR) on the other end # of it, but probably didn't. Anyway, it's too messy, so let's just make sure # anyone reading from stdin is disappointed. # # (You can't just leave stdin/stdout "not open" by closing the file # descriptors. Then the next file that opens is automatically assigned 0 or 1, # and people *trying* to read/write stdin/stdout get screwed.) os.dup2(0, 3) os.dup2(1, 4) os.dup2(2, 1) fd = os.open('/dev/null', os.O_RDONLY) os.dup2(fd, 0) os.close(fd) os.environ['BUP_SERVER_REVERSE'] = helpers.hostname() os.execvp(argv[0], argv) sys.exit(99)
def __init__(self): # Open a pair of null files self.null_fds = [os.open(os.devnull, os.O_RDWR) for x in range(2)] # Save the actual stdout (1) and stderr (2) file descriptors. self.save_fds = [os.dup(1), os.dup(2)]
def archive_wal(self, compressor, wal_info): """ Archive a WAL segment and update the wal_info object :param compressor: the compressor for the file (if any) :param WalFileInfo wal_info: the WAL file is being processed """ src_file = wal_info.orig_filename src_dir = os.path.dirname(src_file) dst_file = wal_info.fullpath(self.server) tmp_file = dst_file + '.tmp' dst_dir = os.path.dirname(dst_file) error = None try: # Run the pre_archive_script if present. script = HookScriptRunner(self.backup_manager, 'archive_script', 'pre') script.env_from_wal_info(wal_info, src_file) script.run() # Run the pre_archive_retry_script if present. retry_script = RetryHookScriptRunner(self.backup_manager, 'archive_retry_script', 'pre') retry_script.env_from_wal_info(wal_info, src_file) retry_script.run() # Check if destination already exists if os.path.exists(dst_file): src_uncompressed = src_file dst_uncompressed = dst_file dst_info = WalFileInfo.from_file(dst_file) try: comp_manager = self.backup_manager.compression_manager if dst_info.compression is not None: dst_uncompressed = dst_file + '.uncompressed' comp_manager.get_compressor( compression=dst_info.compression).decompress( dst_file, dst_uncompressed) if wal_info.compression: src_uncompressed = src_file + '.uncompressed' comp_manager.get_compressor( compression=wal_info.compression).decompress( src_file, src_uncompressed) # Directly compare files. # When the files are identical # raise a MatchingDuplicateWalFile exception, # otherwise raise a DuplicateWalFile exception. if filecmp.cmp(dst_uncompressed, src_uncompressed): raise MatchingDuplicateWalFile(wal_info) else: raise DuplicateWalFile(wal_info) finally: if src_uncompressed != src_file: os.unlink(src_uncompressed) if dst_uncompressed != dst_file: os.unlink(dst_uncompressed) mkpath(dst_dir) # Compress the file only if not already compressed if compressor and not wal_info.compression: compressor.compress(src_file, tmp_file) # Perform the real filesystem operation with the xlogdb lock taken. # This makes the operation atomic from the xlogdb file POV with self.server.xlogdb('a') as fxlogdb: if compressor and not wal_info.compression: shutil.copystat(src_file, tmp_file) os.rename(tmp_file, dst_file) os.unlink(src_file) # Update wal_info stat = os.stat(dst_file) wal_info.size = stat.st_size wal_info.compression = compressor.compression else: # Try to atomically rename the file. If successful, # the renaming will be an atomic operation # (this is a POSIX requirement). try: os.rename(src_file, dst_file) except OSError: # Source and destination are probably on different # filesystems shutil.copy2(src_file, tmp_file) os.rename(tmp_file, dst_file) os.unlink(src_file) # At this point the original file has been removed wal_info.orig_filename = None # Execute fsync() on the archived WAL file file_fd = os.open(dst_file, os.O_RDONLY) os.fsync(file_fd) os.close(file_fd) # Execute fsync() on the archived WAL containing directory fsync_dir(dst_dir) # Execute fsync() also on the incoming directory fsync_dir(src_dir) # Updates the information of the WAL archive with # the latest segments fxlogdb.write(wal_info.to_xlogdb_line()) # flush and fsync for every line fxlogdb.flush() os.fsync(fxlogdb.fileno()) except Exception as e: # In case of failure save the exception for the post scripts error = e raise # Ensure the execution of the post_archive_retry_script and # the post_archive_script finally: # Run the post_archive_retry_script if present. try: retry_script = RetryHookScriptRunner(self, 'archive_retry_script', 'post') retry_script.env_from_wal_info(wal_info, dst_file, error) retry_script.run() except AbortedRetryHookScript as e: # Ignore the ABORT_STOP as it is a post-hook operation _logger.warning( "Ignoring stop request after receiving " "abort (exit code %d) from post-archive " "retry hook script: %s", e.hook.exit_status, e.hook.script) # Run the post_archive_script if present. script = HookScriptRunner(self, 'archive_script', 'post', error) script.env_from_wal_info(wal_info, dst_file) script.run()
def test_access(self): f = os.open(test_support.TESTFN, os.O_CREAT | os.O_RDWR) os.close(f) self.assertTrue(os.access(test_support.TESTFN, os.W_OK))
def __call_nohup__(self, commander): (pid, self.r_path, self.w_path, self.stdin_path, self.stdout_path, self.stderr_path) = daemonize(self.basecmd.cmd_hash) if pid == 1: # Child process make commands commander._close_cmds_stdios(self) (self.pid, r_pipe, w_pipe, stdin_pipe, stdout_pipe, stderr_pipe) = create_process_cmd() if self.pid == 0: # Child process make commands self.msg = ms.Messenger(ms.StdIOWrapperIn(r_pipe), ms.StdIOWrapperOut(w_pipe)) try: self.basecmd.results = self.obj(*self.basecmd.args, **self.basecmd.kargs) except Exception: err_msg = traceback.format_exc() self.msg.write_msg(remote_interface.CmdTraceBack(err_msg)) sys.exit(-1) finally: self.msg.write_msg(self.basecmd.results) sys.exit(0) else: # helper child process open communication pipes. # This process is able to manage problem with connection width # main parent process. It allows start unchanged child process. self.r_pipe = os.open(self.r_path, os.O_RDONLY) self.w_pipe = os.open(self.w_path, os.O_WRONLY) sys.stdout = os.fdopen(os.open(self.stdout_path, os.O_WRONLY), "w", 0) sys.stderr = os.fdopen(os.open(self.stderr_path, os.O_WRONLY), "w", 0) sys.stdin = os.fdopen(os.open(self.stdin_path, os.O_RDONLY), "r", 0) w_fds = [r_pipe, w_pipe, stdin_pipe, stdout_pipe, stderr_pipe] m_fds = [ self.r_pipe, self.w_pipe, sys.stdin.fileno(), sys.stdout.fileno(), sys.stderr.fileno() ] p = select.poll() p.register(r_pipe) p.register(w_pipe) # p.register(stdin_pipe) p.register(stdout_pipe) p.register(stderr_pipe) p.register(self.r_pipe) # p.register(self.w_pipe) p.register(sys.stdin.fileno()) # p.register(sys.stdout.fileno()) # p.register(sys.stderr.fileno()) io_map = { r_pipe: self.w_pipe, self.r_pipe: w_pipe, sys.stdin.fileno(): stdin_pipe, stdout_pipe: sys.stdout.fileno(), stderr_pipe: sys.stderr.fileno() } while 1: d = p.poll() w_ev = [x for x in d if x[0] in w_fds] m_ev = [x for x in d if x[0] in m_fds] w_hup, w_read, _ = sort_fds_event(w_ev) m_hup, m_read, _ = sort_fds_event(m_ev) if m_hup: time.sleep(0.1) if w_hup: # child process finished for r in w_read: data = os.read(r, 16384) os.write(io_map[r], data) break for r in w_read: data = os.read(r, 16384) os.write(io_map[r], data) for r in m_read: data = os.read(r, 16384) os.write(io_map[r], data) self.msg = ms.Messenger(ms.StdIOWrapperIn(self.r_pipe), ms.StdIOWrapperOut(self.w_pipe)) self.msg.write_msg(CmdFinish()) exit(0) else: # main process open communication named pipes. self.w_pipe = os.open(self.w_path, os.O_WRONLY) self.r_pipe = os.open(self.r_path, os.O_RDONLY) self.stdout_pipe = os.open(self.stdout_path, os.O_RDONLY) self.stderr_pipe = os.open(self.stderr_path, os.O_RDONLY) self.stdin_pipe = os.open(self.stdin_path, os.O_WRONLY) self.msg = ms.Messenger(ms.StdIOWrapperIn(self.r_pipe), ms.StdIOWrapperOut(self.w_pipe))
#!/usr/bin/python3 import fcntl import struct import os import time from scapy.all import * TUNSETIFF = 0x400454ca IFF_TUN = 0x0001 IFF_TAP = 0x0002 IFF_NO_PI = 0x1000 # Create the tun interface tun = os.open("/dev/net/tun", os.O_RDWR) ifr = struct.pack('16sH', b'group9_%d', IFF_TUN | IFF_NO_PI) ifname_bytes = fcntl.ioctl(tun, TUNSETIFF, ifr) # Get the interface name ifname = ifname_bytes.decode('UTF-8')[:16].strip("\x00") # Assign IP address to interface os.system("ip addr add 192.168.53.99/24 dev {}".format(ifname)) os.system("ip link set dev {} up".format(ifname)) print("Interface Name: {}".format(ifname)) while True: # Get a packet from the tun interface packet = os.read(tun, 2048) if True: ip = IP(packet) ip.show()
def TemporaryFile(mode='w+b', buffering=-1, encoding=None, newline=None, suffix=None, prefix=None, dir=None, *, errors=None): """Create and return a temporary file. Arguments: 'prefix', 'suffix', 'dir' -- as for mkstemp. 'mode' -- the mode argument to io.open (default "w+b"). 'buffering' -- the buffer size argument to io.open (default -1). 'encoding' -- the encoding argument to io.open (default None) 'newline' -- the newline argument to io.open (default None) 'errors' -- the errors argument to io.open (default None) The file is created as mkstemp() would do it. Returns an object with a file-like interface. The file has no name, and will cease to exist when it is closed. """ global _O_TMPFILE_WORKS prefix, suffix, dir, output_type = _sanitize_params( prefix, suffix, dir) flags = _bin_openflags if _O_TMPFILE_WORKS: try: flags2 = (flags | _os.O_TMPFILE) & ~_os.O_CREAT fd = _os.open(dir, flags2, 0o600) except IsADirectoryError: # Linux kernel older than 3.11 ignores the O_TMPFILE flag: # O_TMPFILE is read as O_DIRECTORY. Trying to open a directory # with O_RDWR|O_DIRECTORY fails with IsADirectoryError, a # directory cannot be open to write. Set flag to False to not # try again. _O_TMPFILE_WORKS = False except OSError: # The filesystem of the directory does not support O_TMPFILE. # For example, OSError(95, 'Operation not supported'). # # On Linux kernel older than 3.11, trying to open a regular # file (or a symbolic link to a regular file) with O_TMPFILE # fails with NotADirectoryError, because O_TMPFILE is read as # O_DIRECTORY. pass else: try: return _io.open(fd, mode, buffering=buffering, newline=newline, encoding=encoding, errors=errors) except: _os.close(fd) raise # Fallback to _mkstemp_inner(). (fd, name) = _mkstemp_inner(dir, prefix, suffix, flags, output_type) try: _os.unlink(name) return _io.open(fd, mode, buffering=buffering, newline=newline, encoding=encoding, errors=errors) except: _os.close(fd) raise
from psana.parallelreader import ParallelReader from psana.psexp.packet_footer import PacketFooter import os, glob from psana import dgram tmp_dir = os.path.join(os.environ.get('TEST_XTC_DIR', os.getcwd()), '.tmp') xtc_files = [ os.path.join(tmp_dir, 'data-r0001-s00.xtc2'), os.path.join(tmp_dir, 'data-r0001-s01.xtc2') ] fds = [os.open(xtc_file, os.O_RDONLY) for xtc_file in xtc_files] configs = [dgram.Dgram(file_descriptor=fd) for fd in fds] prl_reader = ParallelReader(fds) block = prl_reader.get_block() pf = PacketFooter(view=block) views = pf.split_packets() for i in range(len(views)): config, view = configs[i], views[i] d = dgram.Dgram(config=config, view=view) #assert getattr(d.epics[0].fast, 'HX2:DVD:GCC:01:PMON') == 41.0 #assert getattr(d.epics[0].slow, 'XPP:GON:MMS:01:RBV') == 41.0
def handle(self): ubicacion = args.documentroot os.chdir(ubicacion) dic={"ico":"image/vnd.svf","txt":" text/plain","jpg":" image/jpeg", "ppm":" image/x-portable-pixmap","html":" text/html","pdf":" application/pdf"} self.data = self.request.recv(1024) encabezado = self.data.decode().splitlines()[0] #print(encabezado) ruta_archivo = encabezado.split()[1] #print(ruta_archivo) ruta_archivo= ruta_archivo.split("/") print(ruta_archivo) respuesta="" for x in range(len(ruta_archivo)-1): if ruta_archivo[1+x] == '' and len(ruta_archivo) == 2: #print("cuando es /") archivo = 'index.html' respuesta= "200 OK" break if os.path.isfile(ruta_archivo[1+x]) == True: #si existe el archivo archivo = ruta_archivo[1+x] respuesta="200 OK" #print("existe el archivo") #break if os.path.isfile(ruta_archivo[1+x]) == False: #si no esta el archivo archivo = '400error.html' respuesta="404 Not Found" #print("no esta el archivo") if os.path.isdir(ruta_archivo[1+x]) == True: #si existe el directorio os.chdir(ubicacion+"/"+ruta_archivo[1+x]) archivo="index.html" #print("existe el directorio") #break if ruta_archivo[1+x] == '': #print("cuando es /") archivo = 'index.html' respuesta= "200 OK" #if os.path.isdir(ruta_archivo[1+x]) ==False: #si no esta el directorio #print("no esta el dir") #archivo = '400error.html' extension = archivo.split('.')[1] #print("Extension:",extension) print(self.client_address) print(self.data) fd = os.open(archivo, os.O_RDONLY) body = os.read(fd, 50000) os.close(fd) header = bytearray("HTTP/1.1 "+respuesta+"\r\nContent-type:"+ dic[extension] +"\r\nContent-length:"+str(len(body))+"\r\n\r\n",'utf8') self.request.sendall(header) self.request.sendall(body)
def _stat(fn): fd = _os.open(fn, _os.O_RDONLY) _os.close(fd)
def __init__(self): """ for surpressing std.out streams """ self._null_fds = [os.open(os.devnull, os.O_RDWR) for _ in range(2)] self._save_fds = [os.dup(1), os.dup(2)]
def _modify_s390(base, vmlinuz, initrd, append, error_messages): """ prepare files for s390 install returns '1' on success, '0' on failure """ # Buid parm file from kernel parameters in append. # There is a maximum of 32 parameters. parmfn = os.path.join(PREFIX, BOOT, RHN_KS_DIR, "user.parm") if os.path.exists(parmfn): os.unlink(parmfn) parmfd = os.open(parmfn, os.O_CREAT | os.O_WRONLY) parmdata = string.split(append, maxsplit=31) # Write kernel parameters to a file. Max 80 characters per line. line_position = 0 for item in parmdata: if len(item) >= 80: continue elif (line_position + len(item)) >= 80: # Start a new line os.write(parmfd, "\n") line_position = 0 os.write(parmfd, item) os.write(parmfd, " ") line_position += len(item) + 1 os.write(parmfd, "\n") os.close(parmfd) # Load z/VM unit record module. This was added to RHEL 5.2, # so this is a hard requirement. This also requires that the guest # is running under z/VM, not natively in an LPAR. cmd_args = [ "/sbin/modprobe", "vmur", ] exit_code, stdout, stderr = my_popen(cmd_args) if exit_code: err = { 'command' : string.join(cmd_args), 'exit_code' : exit_code, 'stdout' : stdout.read(), 'stderr' : stderr.read(), } error_messages['s390'] = err return 0 # Next, load vmcp, so we can run hipervisor commands directly. cmd_args = [ "/sbin/modprobe", "vmcp", ] exit_code, stdout, stderr = my_popen(cmd_args) if exit_code: err = { 'command' : string.join(cmd_args), 'exit_code' : exit_code, 'stdout' : stdout.read(), 'stderr' : stderr.read(), } error_messages['s390'] = err return 0 # Bring the virtual reader and virtual punch devices online. cmd_args = [ "/sbin/chccwdev", "-e", "000c", "000d", ] exit_code, stdout, stderr = my_popen(cmd_args) if exit_code: err = { 'command' : string.join(cmd_args), 'exit_code' : exit_code, 'stdout' : stdout.read(), 'stderr' : stderr.read(), } error_messages['s390'] = err return 0 # Clear the reader cmd_args = [ "/sbin/vmur", "purge", "-f", ] exit_code, stdout, stderr = my_popen(cmd_args) if exit_code: err = { 'command' : string.join(cmd_args), 'exit_code' : exit_code, 'stdout' : stdout.read(), 'stderr' : stderr.read(), } error_messages['s390'] = err return 0 # Punch the kernel cmd_args = [ "/sbin/vmur", "punch", "-r", "--name", "KERNEL.IMG", vmlinuz, ] exit_code, stdout, stderr = my_popen(cmd_args) if exit_code: err = { 'command' : string.join(cmd_args), 'exit_code' : exit_code, 'stdout' : stdout.read(), 'stderr' : stderr.read(), } error_messages['s390'] = err return 0 # Punch the parm file cmd_args = [ "/sbin/vmur", "punch", "-t", "-r", "--name", "USER.PARM", parmfn, ] exit_code, stdout, stderr = my_popen(cmd_args) if exit_code: err = { 'command' : string.join(cmd_args), 'exit_code' : exit_code, 'stdout' : stdout.read(), 'stderr' : stderr.read(), } error_messages['s390'] = err return 0 # Punch the initrd cmd_args = [ "/sbin/vmur", "punch", "-r", "--name", "INITRD.IMG", initrd, ] exit_code, stdout, stderr = my_popen(cmd_args) if exit_code: err = { 'command' : string.join(cmd_args), 'exit_code' : exit_code, 'stdout' : stdout.read(), 'stderr' : stderr.read(), } error_messages['s390'] = err return 0 # Set all files to "keep" on reader cmd_args = [ "/sbin/vmcp", "change", "reader", "all", "keep", ] exit_code, stdout, stderr = my_popen(cmd_args) if exit_code: err = { 'command' : string.join(cmd_args), 'exit_code' : exit_code, 'stdout' : stdout.read(), 'stderr' : stderr.read(), } error_messages['s390'] = err return 0 # Set system to reboot from virtual reader (address 000c) reiplfd = os.open("/sys/firmware/reipl/reipl_type", os.O_WRONLY) os.write(reiplfd, "ccw") os.close(reiplfd) reiplfd_ccw = os.open("/sys/firmware/reipl/ccw/device", os.O_WRONLY) os.write(reiplfd_ccw, "0.0.000c") os.close(reiplfd_ccw) return 1
def __init__(self, comms, exp, run_no, run_src, **kwargs): """ Parallel read requires that rank 0 does the file system works. Configs and calib constants are sent to other ranks by MPI. Note that destination callback only works with RunParallel. """ super(RunParallel, self).__init__(exp, run_no, max_events=kwargs['max_events'], batch_size=kwargs['batch_size'], filter_callback=kwargs['filter_callback'], destination=kwargs['destination'], prom_man=kwargs['prom_man']) xtc_files, smd_files, other_files = run_src self.comms = comms psana_comm = comms.psana_comm # TODO tjl and cpo to review rank = psana_comm.Get_rank() size = psana_comm.Get_size() g_ts = self.prom_man.get_metric("psana_timestamp") if rank == 0: # get Configure and BeginRun using SmdReader self.smd_fds = np.array( [os.open(smd_file, os.O_RDONLY) for smd_file in smd_files], dtype=np.int32) self.smdr_man = SmdReaderManager(self) self.configs = self.smdr_man.get_next_dgrams() g_ts.labels("first_event").set(time.time()) self.beginruns = self.smdr_man.get_next_dgrams( configs=self.configs) self._get_runinfo() self.smd_dm = DgramManager(smd_files, configs=self.configs, run=self, fds=self.smd_fds) self.dm = DgramManager(xtc_files, configs=self.smd_dm.configs, run=self) nbytes = np.array([memoryview(config).shape[0] for config in self.configs], \ dtype='i') super()._set_configinfo() super()._set_calibconst() self.bcast_packets = {'calibconst': self.calibconst, \ 'expt': self.expt, 'runnum': self.runnum, 'timestamp': self.timestamp} else: self.smd_dm = None self.dm = None self.configs = None nbytes = np.empty(len(smd_files), dtype='i') self.bcast_packets = None # Send configs without pickling psana_comm.Bcast(nbytes, root=0) # no. of bytes is required for mpich if rank > 0: self.configs = [np.empty(nbyte, dtype='b') for nbyte in nbytes] for i in range(len(self.configs)): psana_comm.Bcast([self.configs[i], nbytes[i], MPI.BYTE], root=0) # Send other small things using small-case bcast self.bcast_packets = psana_comm.bcast(self.bcast_packets, root=0) if rank > 0: self.configs = [ dgram.Dgram(view=config, offset=0) for config in self.configs ] g_ts.labels("first_event").set(time.time()) self.dm = DgramManager(xtc_files, configs=self.configs, run=self) super()._set_configinfo( ) # after creating a dgrammanger, we can setup config info self.calibconst = self.bcast_packets['calibconst'] self.expt = self.bcast_packets['expt'] self.runnum = self.bcast_packets['runnum'] self.timestamp = self.bcast_packets['timestamp'] self.esm = EnvStoreManager(self.configs, 'epics', 'scan')
def _generate_rpm_data(type_id, rpm_filename, user_metadata=None): """ For the given RPM, analyzes its metadata to generate the appropriate unit key and metadata fields, returning both to the caller. :param type_id: The type of the unit that is being generated :type type_id: str :param rpm_filename: full path to the RPM to analyze :type rpm_filename: str :param user_metadata: user supplied metadata about the unit. This is optional. :type user_metadata: dict :return: tuple of unit key and unit metadata for the RPM :rtype: tuple """ # Expected metadata fields: # "vendor", "description", "buildhost", "license", "vendor", "requires", "provides", # "relativepath", "filename" # # Expected unit key fields: # "name", "epoch", "version", "release", "arch", "checksumtype", "checksum" unit_key = dict() metadata = dict() # Read the RPM header attributes for use later ts = rpm.TransactionSet() ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES) fd = os.open(rpm_filename, os.O_RDONLY) try: headers = ts.hdrFromFdno(fd) os.close(fd) except rpm.error: # Raised if the headers cannot be read os.close(fd) raise # -- Unit Key ----------------------- # Checksum if user_metadata and user_metadata.get('checksum_type'): user_checksum_type = user_metadata.get('checksum_type') user_checksum_type = verification.sanitize_checksum_type( user_checksum_type) unit_key['checksumtype'] = user_checksum_type else: unit_key['checksumtype'] = verification.TYPE_SHA256 unit_key['checksum'] = _calculate_checksum(unit_key['checksumtype'], rpm_filename) # Name, Version, Release, Epoch for k in ['name', 'version', 'release', 'epoch']: unit_key[k] = headers[k] # Epoch munging if unit_key['epoch'] is None: unit_key['epoch'] = str(0) else: unit_key['epoch'] = str(unit_key['epoch']) # Arch if headers['sourcepackage']: if RPMTAG_NOSOURCE in headers.keys(): unit_key['arch'] = 'nosrc' else: unit_key['arch'] = 'src' else: unit_key['arch'] = headers['arch'] # -- Unit Metadata ------------------ # construct filename from metadata (BZ #1101168) if headers[rpm.RPMTAG_SOURCEPACKAGE]: if type_id != models.SRPM.TYPE: raise PulpCodedValidationException(error_code=error_codes.RPM1002) rpm_basefilename = "%s-%s-%s.src.rpm" % ( headers['name'], headers['version'], headers['release']) else: if type_id != models.RPM.TYPE: raise PulpCodedValidationException(error_code=error_codes.RPM1003) rpm_basefilename = "%s-%s-%s.%s.rpm" % ( headers['name'], headers['version'], headers['release'], headers['arch']) metadata['relativepath'] = rpm_basefilename metadata['filename'] = rpm_basefilename # This format is, and has always been, incorrect. As of the new yum importer, the # plugin will generate these from the XML snippet because the API into RPM headers # is atrocious. This is the end game for this functionality anyway, moving all of # that metadata derivation into the plugin, so this is just a first step. # I'm leaving these in and commented to show how not to do it. # metadata['requires'] = [(r,) for r in headers['requires']] # metadata['provides'] = [(p,) for p in headers['provides']] metadata['buildhost'] = headers['buildhost'] metadata['license'] = headers['license'] metadata['vendor'] = headers['vendor'] metadata['description'] = headers['description'] metadata['build_time'] = headers[rpm.RPMTAG_BUILDTIME] # Use the mtime of the file to match what is in the generated xml from # rpm_parse.get_package_xml(..) file_stat = os.stat(rpm_filename) metadata['time'] = file_stat[stat.ST_MTIME] return unit_key, metadata
def create_tunnel(tun_name='tun%d', tun_mode=IFF_TUN): tun_fd = os.open("/dev/net/tun", os.O_RDWR) ifn = ioctl(tun_fd, TUNSETIFF, struct.pack(b"16sH", tun_name.encode(), tun_mode)) tun_name = ifn[:16].decode().strip("\x00") return tun_fd, tun_name
def run(): global FALLBACK_METHOD global IFCONFIG_URL sys.stdout.write("[i] initial testing...\n") for candidate in IFCONFIG_CANDIDATES: result = retrieve(candidate) if re.search(r"\A\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\Z", (result or "").strip()): IFCONFIG_URL = candidate break process = subprocess.Popen("curl -m %d -A \"%s\" %s" % (TIMEOUT, USER_AGENT, IFCONFIG_URL), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, _ = process.communicate() FALLBACK_METHOD = re.search(r"\d+\.\d+\.\d+\.\d+", stdout or "") is None sys.stdout.write("[i] retrieving list of proxies...\n") try: proxies = json.loads( retrieve(PROXY_LIST_URL, headers={"User-agent": USER_AGENT})) except: exit( "[!] something went wrong during the proxy list retrieval/parsing. Please check your network settings and try again" ) random.shuffle(proxies) if options.country or options.anonymity or options.type: _ = [] for proxy in proxies: if options.country and not re.search(options.country, proxy["country"], re.I): continue if options.anonymity and not re.search( options.anonymity, "%s (%s)" % (proxy["anonymity"], ANONIMITY_LEVELS.get(proxy["anonymity"].lower(), "")), re.I): continue if options.type and not re.search(options.type, proxy["proto"], re.I): continue _.append(proxy) proxies = _ if options.outputFile: handle = os.open(options.outputFile, os.O_APPEND | os.O_CREAT | os.O_TRUNC | os.O_WRONLY) sys.stdout.write("[i] storing results to '%s'...\n" % options.outputFile) else: handle = None queue = Queue.Queue() for proxy in proxies: queue.put(proxy) sys.stdout.write("[i] testing %d proxies (%d threads)...\n\n" % (len(proxies) if not FALLBACK_METHOD else sum( proxy["proto"] in ("http", "https") for proxy in proxies), options.threads or THREADS)) for _ in xrange(options.threads or THREADS): thread = threading.Thread(target=worker, args=[queue, handle]) thread.daemon = True try: thread.start() except ThreadError as ex: sys.stderr.write( "[x] error occurred while starting new thread ('%s')" % ex.message) break threads.append(thread) try: alive = True while alive: alive = False for thread in threads: if thread.isAlive(): alive = True time.sleep(0.1) except KeyboardInterrupt: sys.stderr.write("\r \n[!] Ctrl-C pressed\n") else: sys.stdout.write("\n[i] done\n") finally: sys.stdout.flush() sys.stderr.flush() if handle: os.close(handle) os._exit(0)
def main(): #阻止重复运行 pid_file = '/tmp/jqueuecons.pid' if os.path.isfile(pid_file): logger.info("The program is running") return 1 else: signal(SIGTERM, lambda signum, stack_frame: exit(1)) atexit.register(lambda: atexit_removepid(pid_file)) fd = os.open(pid_file, os.O_CREAT | os.O_EXCL | os.O_RDWR) os.write(fd, "%s\n" % os.getpid()) os.close(fd) #读取机器信息 path = os.path.split(os.path.realpath(__file__))[0] db = 0 try: db = open(path + '/host', 'r') host, evn = pickle.load(db) db.close() except: pass if not db: logger.info("please register this host") return 1 #检查状态 listinfo = JobsWeb.models.Alertjoblist.objects.raw(__sql_list) for t in listinfo: #检查依赖 if t.job_type == 3: ds = JobsWeb.models.Alertjoblist.objects.raw(__sql_deps % t.id) for d in ds: if d.status == 2: continue #检查时间 if t.job_first_start >= time.time(): continue #启子进程中运行 logger.info("get a task: id %s" % t.id) pid = os.fork() if pid == 0: jobid = t.id #atexit.unregister() #阻止重复运行 pid_file = '/tmp/job_%s.pid' % jobid if os.path.isfile(pid_file): customlog("job id:%s is running" % jobid) return (1) else: atexit.register(lambda: atexit_removepid(pid_file)) fd = os.open(pid_file, os.O_CREAT | os.O_EXCL | os.O_RDWR) os.write(fd, "%s\n" % str(os.getpid())) os.close(fd) #get lock mysqltarget = mysqlOpe() onejobnfo = mysqltarget.selectone(__sql_info % jobid) '''job的一些信息''' if onejobnfo[0] == 1: mysqltarget.update(__sql_job_update2 % (2, jobid)) else: return (1) customlog("this task id:%s will run " % jobid) customlog("command: " + t.command + " | python JobsScript/logger.py id_%s" % jobid) logid = mysqltarget.insert(__sql_log_insert % (jobid, getLocalDate(), 0)) mysqltarget.close() #status = os.system(t.command + " | python JobsScript/logger.py id_%s" % jobid) >> 8 status = os.system( t.command ) >> 8 #status, output = commands.getstatusoutput(t.command + " | python JobsScript/logger.py") customlog("${PIPESTATUS[*]}: %s: %s" % (str(status), t.command)) mysqltarget = mysqlOpe() job_runinterval = onejobnfo[1] runtime = job_runinterval * 60 + time.time() groupid = onejobnfo[2] errorid = onejobnfo[3] jobname = onejobnfo[4] if job_runinterval == 0: mysqltarget.update(__sql_job_update2 % (3, jobid)) else: if status: #job出错了 alertcontent = "job id:%s(%s) get a error,returnCode is %d" % ( t.id, jobname, status) mysqltarget.update(__sql_log_update % (1, getLocalDate(), int(logid))) mysqltarget.insert(__sql_alert_insert % (1, errorid, groupid, alertcontent, '', 0, getLocalDate())) customlog(alertcontent) else: mysqltarget.update(__sql_log_update % (0, getLocalDate(), int(logid))) customlog("job id:%s is done" % t.id) mysqltarget.update(__sql_job_update1 % (1, runtime, jobid)) #job下一次运行时间 #mysqltarget.update(__sql_job_update2 %(1, jobid)) mysqltarget.close() #完成 return (0) elif pid > 0: #os.waitpid(pid,os.WNOHANG) os.waitpid(-1, os.WNOHANG) pass else: pass
def _open_fd(*args, **kwargs): fd = os.open(*args, **kwargs) try: yield fd finally: os.close(fd)
def daemonize(enable_stdio_inheritance=False): """\ Standard daemonization of a process. http://www.svbug.com/documentation/comp.unix.programmer-FAQ/faq_2.html#SEC16 """ if 'GUNICORN_FD' not in os.environ: if os.fork(): os._exit(0) os.setsid() if os.fork(): os._exit(0) os.umask(0o22) # In both the following any file descriptors above stdin # stdout and stderr are left untouched. The inheritence # option simply allows one to have output go to a file # specified by way of shell redirection when not wanting # to use --error-log option. if not enable_stdio_inheritance: # Remap all of stdin, stdout and stderr on to # /dev/null. The expectation is that users have # specified the --error-log option. closerange(0, 3) fd_null = os.open(REDIRECT_TO, os.O_RDWR) if fd_null != 0: os.dup2(fd_null, 0) os.dup2(fd_null, 1) os.dup2(fd_null, 2) else: fd_null = os.open(REDIRECT_TO, os.O_RDWR) # Always redirect stdin to /dev/null as we would # never expect to need to read interactive input. if fd_null != 0: os.close(0) os.dup2(fd_null, 0) # If stdout and stderr are still connected to # their original file descriptors we check to see # if they are associated with terminal devices. # When they are we map them to /dev/null so that # are still detached from any controlling terminal # properly. If not we preserve them as they are. # # If stdin and stdout were not hooked up to the # original file descriptors, then all bets are # off and all we can really do is leave them as # they were. # # This will allow 'gunicorn ... > output.log 2>&1' # to work with stdout/stderr going to the file # as expected. # # Note that if using --error-log option, the log # file specified through shell redirection will # only be used up until the log file specified # by the option takes over. As it replaces stdout # and stderr at the file descriptor level, then # anything using stdout or stderr, including having # cached a reference to them, will still work. def redirect(stream, fd_expect): try: fd = stream.fileno() if fd == fd_expect and stream.isatty(): os.close(fd) os.dup2(fd_null, fd) except AttributeError: pass redirect(sys.stdout, 1) redirect(sys.stderr, 2)