def run_command(args): '''Call the command and get the stdout and stderr. This is like `subprocess.call()`, but it returns an object with the output of stdout, stderr, and the process return-code. .. py:attribute:: args Command suitable to pass to `subprocess.call()`, typically a list of the command and its arguments. :rtype: Object Includes attributes "stdout", "stderr", and "exitcode". ''' class Return: def __init__(self, stdout, stderr, exitcode): self.stdout = stdout self.stderr = stderr self.exitcode = exitcode with os.tmpfile() as fpout: with os.tmpfile() as fperr: exitcode = subprocess.call(args, stdout=fpout, stderr=fperr) fpout.seek(0) stdout = fpout.read() fperr.seek(0) stderr = fperr.read() return Return(stdout, stderr, exitcode)
def main(): db = MySQLdb.connect(host="localhost", user="******", passwd="Docent_2012", db="docent_db", use_unicode = True, charset = 'utf8') cursor = db.cursor() #zippedfile = urllib2.urlopen('http://geolite.maxmind.com/download/geoip/'+ # 'database/GeoLiteCity_CSV/GeoLiteCity_20120504.zip') zippedfile = open('GeoLiteCity_20120504.zip') tempFile = os.tmpfile() tempFile.write(zippedfile.read()) zippedfile.close() unzippedfile = zipfile.ZipFile(tempFile,"r") blocks = os.tmpfile() blocks.write(unzippedfile.read(unzippedfile.namelist()[0])) blocks.seek(0) location = os.tmpfile() location.write(unzippedfile.read(unzippedfile.namelist()[1])) location.seek(0) unzippedfile.close() tempFile.close() for n in range(0,2): blocks.readline() location.readline() insert_locations(db, cursor, location) insert_blocks(db, cursor, blocks)
def __init__(self, mixed_out_err = False): "Start capture of the Unix-level stdout and stderr." if (not hasattr(os, 'tmpfile') or not hasattr(os, 'dup') or not hasattr(os, 'dup2') or not hasattr(os, 'fdopen')): self.dummy = 1 else: try: self.tmpout = os.tmpfile() if mixed_out_err: self.tmperr = self.tmpout else: self.tmperr = os.tmpfile() except OSError: # bah? on at least one Windows box self.dummy = 1 return self.dummy = 0 # make new stdout/stderr files if needed self.localoutfd = os.dup(1) self.localerrfd = os.dup(2) if hasattr(sys.stdout, 'fileno') and sys.stdout.fileno() == 1: self.saved_stdout = sys.stdout sys.stdout = os.fdopen(self.localoutfd, 'w', 1) else: self.saved_stdout = None if hasattr(sys.stderr, 'fileno') and sys.stderr.fileno() == 2: self.saved_stderr = sys.stderr sys.stderr = os.fdopen(self.localerrfd, 'w', 0) else: self.saved_stderr = None os.dup2(self.tmpout.fileno(), 1) os.dup2(self.tmperr.fileno(), 2)
def run_collect_files(*args, **options): """ Runs a series of commands like :func:`run_collect`, but returns open file objects for `stdout` and `stderr` instead of strings. Example: Iterate over the lines of ``ls -l | sort -r`` and print them out with line numbers:: (f_stdout, f_stderr) = run_collect_files("ls -l", "sort -r") for (line_no, line) in enumerate(f_stdout): print ("%3d %s" % (line_no, line[:-1])) """ # Create temporary files to collect output stdout_tmp = os.tmpfile() stderr_tmp = os.tmpfile() # Replace any existing "stdout" and "stderr" definitions options["stdout"] = stdout_tmp options["stderr"] = stderr_tmp # Use run_parallel to run *args as a single pipeline try: run_parallel(pipeline(*args), **options) except PipelineException, e: msg = e.get_message() stderr_tmp.flush() stderr_tmp.seek(0) msg = "\n".join(filter(None, [msg, stderr_tmp.read().strip()])) raise PipelineException(msg, e.get_exit_statuses())
def _filter_message(self, msg): self.log.trace() self._prepare_child() if msg.recipient == None or msg.sender == None: raise getmailConfigurationError('TMDA requires the message envelope' ' and therefore a multidrop retriever') # At least some security... if (os.geteuid() == 0 and not self.conf['allow_root_commands'] and self.conf['user'] == None): raise getmailConfigurationError('refuse to invoke external' ' commands as root by default') stdout = os.tmpfile() stderr = os.tmpfile() childpid = os.fork() if not childpid: # Child self._filter_command(msg, stdout, stderr) self.log.debug('spawned child %d\n' % childpid) # Parent exitcode = self._wait_for_child(childpid) stderr.seek(0) err = stderr.read().strip() self.log.debug('command %s %d exited %d\n' % (self.conf['command'], childpid, exitcode)) return exitcode, msg, err
def __init__(self, mixed_out_err = False): "Start capture of the Unix-level stdout and stderr." if (sys.platform == 'win32' or # os.tmpfile fails, cpython issue #2232 not hasattr(os, 'tmpfile') or not hasattr(os, 'dup') or not hasattr(os, 'dup2') or not hasattr(os, 'fdopen')): self.dummy = 1 else: self.dummy = 0 # make new stdout/stderr files if needed self.localoutfd = os.dup(1) self.localerrfd = os.dup(2) if hasattr(sys.stdout, 'fileno') and sys.stdout.fileno() == 1: self.saved_stdout = sys.stdout sys.stdout = os.fdopen(self.localoutfd, 'w', 1) else: self.saved_stdout = None if hasattr(sys.stderr, 'fileno') and sys.stderr.fileno() == 2: self.saved_stderr = sys.stderr sys.stderr = os.fdopen(self.localerrfd, 'w', 0) else: self.saved_stderr = None self.tmpout = os.tmpfile() if mixed_out_err: self.tmperr = self.tmpout else: self.tmperr = os.tmpfile() os.dup2(self.tmpout.fileno(), 1) os.dup2(self.tmperr.fileno(), 2)
def reset(self): """ Reset this Task to a clean state prior to execution. """ self.stdout_file = os.tmpfile() self.stderr_file = os.tmpfile() self.started_at = None self.completed_at = None self.successful = None
def __init__(self, delay = 0.02): multiprocessing.Process.__init__(self) self.output = os.tmpfile() self.done = os.tmpfile() self.input = multiprocessing.Queue() self.delay = delay self.globs = {} for ky, val in sage.all.__dict__.iteritems(): self.globs[ky] = val
def open2(a, b): MockOs.open2_counter += 1 if MockOs.open2_counter == 1: raise IOError if MockOs.open2_counter == 2: tmp = os.tmpfile() tmp.write('<h1>') tmp.seek(0) return tmp if MockOs.open2_counter == 3: return os.tmpfile()
def open1(a, b): MockOs.open1_counter += 1 if MockOs.open1_counter == 1: raise IOError if MockOs.open1_counter == 2: tmp = os.tmpfile() tmp.write(('{0}<%if True:{1} Server.append(0){1}' 'else:{1} pass{1}end%>'.format( server_page.NT_UTF_8_IDENTIFIER, chr(10)))) tmp.seek(0) return tmp if MockOs.open1_counter == 3: return os.tmpfile()
def test_tmpfile(self): if not hasattr(os, "tmpfile"): return # As with test_tmpnam() below, the Windows implementation of tmpfile() # attempts to create a file in the root directory of the current drive. # On Vista and Server 2008, this test will always fail for normal users # as writing to the root directory requires elevated privileges. With # XP and below, the semantics of tmpfile() are the same, but the user # running the test is more likely to have administrative privileges on # their account already. If that's the case, then os.tmpfile() should # work. In order to make this test as useful as possible, rather than # trying to detect Windows versions or whether or not the user has the # right permissions, just try and create a file in the root directory # and see if it raises a 'Permission denied' OSError. If it does, then # test that a subsequent call to os.tmpfile() raises the same error. If # it doesn't, assume we're on XP or below and the user running the test # has administrative privileges, and proceed with the test as normal. with warnings.catch_warnings(): warnings.filterwarnings("ignore", "tmpfile", DeprecationWarning) if sys.platform == 'win32': name = '\\python_test_os_test_tmpfile.txt' if os.path.exists(name): os.remove(name) try: fp = open(name, 'w') except IOError as first: # open() failed, assert tmpfile() fails in the same way. # Although open() raises an IOError and os.tmpfile() raises an # OSError(), 'args' will be (13, 'Permission denied') in both # cases. try: fp = os.tmpfile() except OSError as second: self.assertEqual(first.args, second.args) else: self.fail("expected os.tmpfile() to raise OSError") return else: # open() worked, therefore, tmpfile() should work. Close our # dummy file and proceed with the test as normal. fp.close() os.remove(name) fp = os.tmpfile() fp.write("foobar") fp.seek(0, 0) s = fp.read() fp.close() self.assertTrue(s == "foobar")
def _deliver_message(self, msg, delivered_to, received): self.log.trace() uid = None gid = None # Get user & group of mbox file st_mbox = os.stat(self.conf['path']) user = self.conf['user'] if os.name == 'posix': if user and uid_of_user(user) != os.geteuid(): # Config specifies delivery as user other than current UID uid = uid_of_user(user) gid = gid_of_uid(uid) if uid == 0: raise getmailConfigurationError( 'refuse to deliver mail as root' ) if gid == 0: raise getmailConfigurationError( 'refuse to deliver mail as GID 0' ) self._prepare_child() stdout = os.tmpfile() stderr = os.tmpfile() childpid = os.fork() if not childpid: # Child self.__deliver_message_mbox(uid, gid, msg, delivered_to, received, stdout, stderr) self.log.debug('spawned child %d\n' % childpid) # Parent exitcode = self._wait_for_child(childpid) stdout.seek(0) stderr.seek(0) out = stdout.read().strip() err = stderr.read().strip() self.log.debug('mboxrd delivery process %d exited %d\n' % (childpid, exitcode)) if exitcode or err: raise getmailDeliveryError('mboxrd delivery %d error (%d, %s)' % (childpid, exitcode, err)) if out: self.log.debug('mbox delivery: %s' % out) return self
def test_basic(self): manifest = 'test-dependencies.json' depot_location = '/space/2013' temp_location1 = tempfile.mkdtemp() temp_location2 = tempfile.mkdtemp() dr = DependenyResolver(manifest, depot_location) ds = dr.resolve() #create a temp folder os.tmpfile() for d in ds: downloaded = dr.download(d,temp_location1) extracted = dr.extract(downloaded,temp_location2) ConsoleLogger.info(downloaded) ConsoleLogger.info(extracted)
def reset(self): """ Reset this Task to a clean state prior to execution. """ self.stdout_file = os.tmpfile() self.stderr_file = os.tmpfile() self.stdout = "" self.stderr = "" self.started_at = None self.completed_at = None self.successful = None self.terminate_sent = False self.kill_sent = False
def fetch_episode_raw_media(episode_pk): from ..transcripts.models import TranscriptMedia from .models import Episode episode = Episode.objects.get(pk=episode_pk) transcript = episode.transcript media = TranscriptMedia( transcript=transcript, # file will be set below is_processed=False, is_full_length=True, ) # Stream MP3, then save it. uuid = uuid4().hex raw_path = '{transcript.id}_raw_{uuid}.mp3'.format(**locals()) response = requests.get(episode.media_url, stream=True) raw_file = os.tmpfile() for chunk in response.iter_content(262144): raw_file.write(chunk) raw_file.seek(0) print 'Saving {episode.media_url} to {raw_path}'.format(**locals()) media.file.save(raw_path, File(raw_file)) raw_file.close() # Now process it into our normalized format. media.create_processed_task()
def test_push_inits_no_stdout_spam(): # git init has a tendency to spew to stdout, and that confuses # e.g. a git push tmp = util.maketemp() cfg = RawConfigParser() cfg.add_section('gitosis') repositories = path.join(tmp, 'repositories') os.mkdir(repositories) cfg.set('gitosis', 'repositories', repositories) generated = path.join(tmp, 'generated') os.mkdir(generated) cfg.set('gitosis', 'generate-files-in', generated) cfg.add_section('group foo') cfg.set('group foo', 'members', 'jdoe') cfg.set('group foo', 'writable', 'foo') cfg.add_section('rsp') cfg.set('rsp', 'haveAccessURL', 'example.org') old_stdout = os.dup(1) try: new_stdout = os.tmpfile() os.dup2(new_stdout.fileno(), 1) serve.serve( cfg=cfg, user='******', command="git-receive-pack 'foo'", ) finally: os.dup2(old_stdout, 1) os.close(old_stdout) new_stdout.seek(0) got = new_stdout.read() new_stdout.close() eq(got, '') eq(os.listdir(repositories), ['foo.git']) assert path.isfile(path.join(repositories, 'foo.git', 'HEAD'))
def patch_seq2ropath( patch_seq ): """Apply the patches in patch_seq, return single ropath""" first = patch_seq[0] assert first.difftype != "diff", patch_seq if not first.isreg(): # No need to bother with data if not regular file assert len( patch_seq ) == 1, len( patch_seq ) return first.get_ropath() current_file = first.open( "rb" ) for delta_ropath in patch_seq[1:]: assert delta_ropath.difftype == "diff", delta_ropath.difftype if not isinstance( current_file, file ): # librsync needs true file tempfp = os.tmpfile() misc.copyfileobj( current_file, tempfp ) assert not current_file.close() tempfp.seek( 0 ) current_file = tempfp current_file = librsync.PatchedFile( current_file, delta_ropath.open( "rb" ) ) result = patch_seq[-1].get_ropath() result.setfileobj( current_file ) return result
def replace_css(self, filename): tmp = os.tmpfile() rel_filename = os.path.join(settings.MEDIA_ROOT, filename) css = open(rel_filename, mode='r') for line in css: matches = [] for match in re.finditer(CSS_ASSET_PATTERN, line): try: grp = match.groupdict() absolute = grp['filename'].startswith('/') if absolute: asset_path = os.path.join(settings.MEDIA_ROOT, '.'+grp['filename']) else: asset_path = os.path.join(os.path.dirname(rel_filename), grp['filename']) asset = relpath(asset_path, settings.MEDIA_ROOT) asset_version = 'url(%s%s)' % (self.abs_versions[asset], grp.get('fragment') or '') matches.append((grp['url'], asset_version)) except KeyError: print "Failed to find %s in version map. Is it an absolute path?" % asset raise SystemExit(1) for old, new in matches: line = line.replace(old, new) tmp.write(line) tmp.flush() tmp.seek(0) css.close() css = open(rel_filename, mode='wb') shutil.copyfileobj(tmp, css)
def save(self): filename = config.hits_dir + '/' + remove_extension(self.show.from_file) + '.hits.json' # we use a tmp file because json.dump may leave an unparsable file if an error occurs while encoding with os.tmpfile() as tmp: json.dump(self.pre_encode(self.json), tmp, indent = 4, sort_keys = True, cls = HitsEncoder) tmp.seek(0) open(filename, 'w').write(tmp.read())
def _filter_command(self, msg, stdout, stderr): try: # Write out message with native EOL convention msgfile = os.tmpfile() msgfile.write(msg.flatten(True, True, include_from=True)) msgfile.flush() os.fsync(msgfile.fileno()) # Rewind msgfile.seek(0) # Set stdin to read from this file os.dup2(msgfile.fileno(), 0) # Set stdout and stderr to write to files os.dup2(stdout.fileno(), 1) os.dup2(stderr.fileno(), 2) change_usergroup(self.log, self.conf['user'], self.conf['group']) args = [self.conf['path'], self.conf['path']] # Set environment for TMDA os.environ['SENDER'] = msg.sender os.environ['RECIPIENT'] = msg.recipient os.environ['EXT'] = self.conf['conf-break'].join('@'.join( msg.recipient.split('@')[:-1]).split( self.conf['conf-break'])[1:]) self.log.trace('SENDER="%(SENDER)s",RECIPIENT="%(RECIPIENT)s"' ',EXT="%(EXT)s"' % os.environ) self.log.debug('about to execl() with args %s\n' % str(args)) os.execl(*args) except StandardError, o: # Child process; any error must cause us to exit nonzero for parent # to detect it self.log.critical('exec of filter %s failed (%s)' % (self.conf['command'], o)) os._exit(127)
def __call__(self, code=""): tmpFile = os.tmpfile() tmpFile.write(code) tmpFile.seek(0) self.plugin.msgThread.dll.MceIrPlaybackFromFile( get_osfhandle(tmpFile.fileno()) )
def srcCutdir(root_dir, dir_list, keep_module_names=[]): """Performs source-code stripping of C and header files""" assert type(root_dir) == types.StringType assert type(dir_list) == types.ListType or type(dir_list) == types.TupleType for directory in dir_list: path = os.path.join(root_dir, directory) if not os.path.isdir(path): dbgPrint("\t%s is not a valid directory\n" % path, dbg_msg.WARNING) continue for root, dirs, files in os.walk(path): for file in files: if not (file.endswith(".c") or file.endswith(".h")): continue filepath = os.path.realpath(os.path.join(root, file)) dbgPrint("\tStripping source file %s...\n" % filepath, dbg_msg.DEBUG) tmp = os.tmpfile() f = open(filepath) tmp.writelines(cutCode(f, keep_module_names)) f = open(filepath, "w") tmp.seek(0) f.writelines(tmp) tmp.close() f.close()
def test_push_inits_no_stdout_spam(): # git init has a tendency to spew to stdout, and that confuses # e.g. a git push tmp = util.maketemp() cfg = RawConfigParser() cfg.add_section("gitosis") repositories = os.path.join(tmp, "repositories") os.mkdir(repositories) cfg.set("gitosis", "repositories", repositories) generated = os.path.join(tmp, "generated") os.mkdir(generated) cfg.set("gitosis", "generate-files-in", generated) cfg.add_section("group foo") cfg.set("group foo", "members", "jdoe") cfg.set("group foo", "writable", "foo") old_stdout = os.dup(1) try: new_stdout = os.tmpfile() os.dup2(new_stdout.fileno(), 1) serve.serve(cfg=cfg, user="******", command="git-receive-pack 'foo'") finally: os.dup2(old_stdout, 1) os.close(old_stdout) new_stdout.seek(0) got = new_stdout.read() new_stdout.close() eq(got, "") eq(os.listdir(repositories), ["foo.git"]) assert os.path.isfile(os.path.join(repositories, "foo.git", "HEAD"))
def open_output(self, path): # TODO better checks.. assert not path.startswith('/') assert not path.startswith('.') assert '/.'not in path fullpath = os.path.join(self.dst_root, path) if path != self.path: # existing file checking is skipped for actions processing # the file itself if os.path.exists(fullpath): raise RuntimeError( 'Output file exists already: %r' % fullpath, ) if path in self.output_files: raise RuntimeError( 'Output file opened already: %r' % fullpath, ) f = os.tmpfile() self.output_files[path] = f # dup the fd so the action freely .close() fd = os.dup(f.fileno()) f2 = os.fdopen(fd, 'w') return f2
def __init__(self, file_, package): self.dir = d = mkdtemp(prefix="advene2_zip_") atexit.register(rmtree, d, True) if hasattr(file_, "seek"): g = None z = ZipFile(file_, "r") else: # ZipFile requires seekable file, dump it in tmpfile g = tmpfile() g.write(file_.read()) g.seek(0) z = ZipFile(g, "r") names = z.namelist() for zname in names: seq = zname.split("/") dirname = recursive_mkdir(d, seq[:-1]) if seq[-1]: fname = path.join(dirname, seq[-1]) h = open(fname, "w") h.write(z.read(zname)) h.close() z.close() if g is not None: g.close() self.content = path.join(d, "content.xml") self.package = package
def getFromTemp(self, audio, msg, wav): fp = os.tmpfile() fp.write(audio) fp.seek(0) wav.fpopen("tmp.wav", AUDIO_READ, fp) self.audio[msg] = wav
def off_test_exportfiles(self): """ See whether we can import and then export some files. The procedure is as follows: 1. Open a vCard file 2. Import the data 3. Export the data to a string 4. Write it back to a file 5. Read it back in 6. Compare with the original data read in 7. Compare with to original read contents """ for filename in self.testfiles : logging.debug('Importing file %s', filename) f = open(filename) filedata1 = f.read() c1 = Contact.importFrom('vCard', filedata1) c1.commit() f2 = os.tmpfile() f2.write(c1.exportTo('vCard')) f2.seek(0) filedata2 = f2.read() c2 = Contact.importFrom('vCard', filedata2) c2.commit() self.compare_contacts(c1, c2) self.assertEqual(filedata1, filedata2)
def get_flash_upload(web): """ Reformat data coming in from Flash FileReference FileReference has silly boundary problems that create bad timeout errors As standard multipart form data is present, this also works fine with standard HTML forms http://www.mail-archive.com/[email protected]/msg04505.html """ import os tmpfile = os.tmpfile() contentLength = int(web.ctx.env["CONTENT_LENGTH"]) if contentLength <= 0: raise AssertionError("Invalid content length") wsgiInput = web.ctx.env["wsgi.input"] while contentLength > 0: chunk = 1024 if contentLength < chunk: chunk = contentLength contentLength -= chunk tmpfile.write(wsgiInput.read(chunk)) tmpfile.seek(0) web.ctx.env["wsgi.input"] = tmpfile input = web.input() tmpfile.close() return input
def _filter_command(self, msg, msginfo, stdout, stderr): try: # Write out message with native EOL convention msgfile = os.tmpfile() msgfile.write(msg.flatten(False, False, include_from=self.conf['unixfrom'])) msgfile.flush() os.fsync(msgfile.fileno()) # Rewind msgfile.seek(0) # Set stdin to read from this file os.dup2(msgfile.fileno(), 0) # Set stdout and stderr to write to files os.dup2(stdout.fileno(), 1) os.dup2(stderr.fileno(), 2) change_usergroup(self.log, self.conf['user'], self.conf['group']) args = [self.conf['path'], self.conf['path']] for arg in self.conf['arguments']: arg = expand_user_vars(arg) for (key, value) in msginfo.items(): arg = arg.replace('%%(%s)' % key, value) args.append(arg) self.log.debug('about to execl() with args %s\n' % str(args)) os.execl(*args) except StandardError, o: # Child process; any error must cause us to exit nonzero for parent # to detect it self.log.critical('exec of filter %s failed (%s)' % (self.conf['command'], o)) os._exit(127)
def runVncViewer(domid, do_autopass, do_daemonize=False): xs = OurXenstoreConnection() d = '/local/domain/%d/' % domid vnc_port = xs.read_eventually(d + 'console/vnc-port') vfb_backend = xs.read_maybe(d + 'device/vfb/0/backend') vnc_listen = None vnc_password = None vnc_password_tmpfile = None cmdl = ['vncviewer'] if vfb_backend is not None: vnc_listen = xs.read_maybe(vfb_backend + '/vnclisten') if do_autopass: vnc_password = xs.read_maybe(vfb_backend + '/vncpasswd') if vnc_password is not None: cmdl.append('-autopass') vnc_password_tmpfile = os.tmpfile() print >>vnc_password_tmpfile, vnc_password vnc_password_tmpfile.seek(0) vnc_password_tmpfile.flush() if vnc_listen is None: vnc_listen = 'localhost' cmdl.append('%s:%d' % (vnc_listen, int(vnc_port) - 5900)) if do_daemonize: pid = utils.daemonize('vncviewer', cmdl, vnc_password_tmpfile) if pid == 0: puts >>sys.stderr, 'failed to invoke vncviewer' os._exit(-1) else: print 'invoking ', ' '.join(cmdl) if vnc_password_tmpfile is not None: os.dup2(vnc_password_tmpfile.fileno(), 0) os.execvp('vncviewer', cmdl)
def test_dump_load(self): # Get a temporary file to dump a queue to that file count = 100 items = [i for i in range(count)] self.q.extend(zip(items, items)) self.assertEquals(self.q.elements(), items) with os.tmpfile() as f: self.q.dump(f) # Now, assert that it is empty self.assertEquals(len(self.q), 0) # Now, try to load it back in f.seek(0) self.q.load(f) self.assertEquals(len(self.q), count) self.assertEquals(self.q.elements(), items) # Now clean up after myself f.truncate() self.q.clear()
def load_script(path, module_name): fp, fname = tools.file_open(path, pathinfo=True) fp2 = None if not isinstance(fp, file): # pylint: disable=file-builtin # imp.load_source need a real file object, so we create # one from the file-like object we get from file_open fp2 = os.tmpfile() fp2.write(fp.read()) fp2.seek(0) try: return imp.load_source(module_name, fname, fp2 or fp) finally: if fp: fp.close() if fp2: fp2.close()
def test_dump_load(self): # Get a temporary file to dump a queue to that file count = 100 self.stack.extend(range(count)) self.assertEquals(self.stack.elements(), [count - i - 1 for i in range(count)]) with os.tmpfile() as f: self.stack.dump(f) # Now, assert that it is empty self.assertEquals(len(self.stack), 0) # Now, try to load it back in f.seek(0) self.stack.load(f) self.assertEquals(len(self.stack), count) self.assertEquals(self.stack.elements(), [count - i - 1 for i in range(count)]) # Now clean up after myself f.truncate() self.stack.clear()
def getScanFiles(expt, scans): """Retrieves the data files for the given scan names or types from the given experiment.""" args = { 'URI': '/REST/projects/%(project)s/subjects/%(subject_label)s/experiments/%(label)s' % expt, 'scans': scans } if verbose: print 'Getting %(URI)s: %(scans)s' % args zfh = os.tmpfile() try: get_to_fh('%(URI)s/scans/%(scans)s/files?format=zip' % args, zfh) zfh.seek(0, os.SEEK_SET) z.ZipFile(zfh, 'r').extractall(cache) finally: zfh.close()
def run_updates(staging, cores): """ run update.php on each wiki found in dblist """ procs = [] with open(staging, "r") as dblist: for db in dblist: db = db.strip() f = os.tmpfile() cmd = "/usr/local/bin/mwscript update.php --wiki=%s --quick" % db p = subprocess.Popen(cmd, stdout=f, stderr=f, shell=True) procs.append((p, f, cmd)) if (len(procs) >= cores): do_wait(procs) procs = [] # catches odd cases where dblist file is smaller than batch size if (len(procs) > 0): do_wait(procs)
def __parse(self): recvbuf = '' while '\n' not in recvbuf: yield fiber.RECV(self.client, Params.TIMEOUT) recvbuf += self.recv() header, recvbuf = recvbuf.split('\n', 1) print 'Client sends', header.rstrip() self.__parse_header(header) args = {} while True: while '\n' not in recvbuf: yield fiber.RECV(self.client, Params.TIMEOUT) recvbuf += self.recv() line, recvbuf = recvbuf.split('\n', 1) if ':' in line: if Params.VERBOSE > 1: print '>', line.rstrip() key, value = line.split(':', 1) key = key.title() assert key not in args, 'duplicate key: %s' % key args[key] = value.strip() elif line and line != '\r': print 'Ignored header line: %r' % line.rstrip() else: break self.__parse_args(args) if self.size: if Params.VERBOSE: print 'Opening temporary file for POST upload' self.body = os.tmpfile() self.body.write(recvbuf) while self.body.tell() < self.size: yield fiber.RECV(self.client, Params.TIMEOUT) self.body.write(self.recv()) assert self.body.tell( ) == self.size, 'message body exceeds content-length' else: assert not recvbuf, 'client sends junk data'
def renderExamplesNode(node, tree, opt, var): object = {'source': None, 'type': 'parsed'} object['target'] = os.path.join(tree['root'], node['dir'], 'index.html') ftmp = os.tmpfile() # Set the "examplename" variable so that we get the page title correct ftmp.write("<<SET examplename: %s>>\n" % node['name'].replace("_", " ")) fin = open(os.path.join(options['includedir'], tree['nodepage']), 'r') line = fin.readline() while (line.strip() != '<<EXAMPLES>>'): ftmp.write(line) line = fin.readline() assert (line != '') # Write boxes for examples nodeCount = 0 for leaf in node['leaves']: if (nodeCount == 0): ftmp.write("<tr>\n") elif (nodeCount % 2 == 0): ftmp.write("</tr><tr>\n") ftmp.write("<<SET exampleuri: %s>>\n" % leaf['uri']) ftmp.write("<<SET exampleimageuri: %s%s>>\n" % (leaf['uri'], 'output_sm.png')) ftmp.write("<<SET exampleimagetxt: %s>>\n" % (leaf['caption'])) ftmp.write("<<SET examplename: %s>>\n" % leaf['name'].replace("_", " ")) f = open(os.path.join(options['includedir'], 'examples-box.html'), 'r') # UGLY for line in f: ftmp.write(line) f.close() nodeCount += 1 if ((nodeCount > 0) and (nodeCount % 2 == 1)): ftmp.write("<td></td>\n") if (nodeCount > 0): ftmp.write("</tr>\n") line = fin.readline() while (line != ''): ftmp.write(line) line = fin.readline() ftmp.write("EOD\n") # Now render the node ftmp.seek(0, 0) insertObject(ftmp, object, opt, var) ftmp.close() for leaf in node['leaves']: renderExamplesLeaf(leaf, node, tree, opt, var)
def __init__(self, args, skip_valgrind=False, **kwargs): """Start an example process""" args = list(args) if skip_valgrind: self.args = args else: self.args = self.vg_args + args self.kwargs = kwargs self._out = os.tmpfile() try: Popen.__init__(self, self.args, stdout=self._out, stderr=STDOUT, **kwargs) except OSError, e: if e.errno == errno.ENOENT: raise NotFoundError(self, str(e)) raise ProcError(self, str(e))
def test_tmpfile(self): if not hasattr(os, "tmpfile"): return # As with test_tmpnam() below, the Windows implementation of tmpfile() # attempts to create a file in the root directory of the current drive. # On Vista and Server 2008, this test will always fail for normal users # as writing to the root directory requires elevated privileges. With # XP and below, the semantics of tmpfile() are the same, but the user # running the test is more likely to have administrative privileges on # their account already. If that's the case, then os.tmpfile() should # work. In order to make this test as useful as possible, rather than # trying to detect Windows versions or whether or not the user has the # right permissions, just try and create a file in the root directory # and see if it raises a 'Permission denied' OSError. If it does, then # test that a subsequent call to os.tmpfile() raises the same error. If # it doesn't, assume we're on XP or below and the user running the test # has administrative privileges, and proceed with the test as normal. with warnings.catch_warnings(): warnings.filterwarnings("ignore", "tmpfile", DeprecationWarning) if sys.platform == 'win32': name = '\\python_test_os_test_tmpfile.txt' if os.path.exists(name): os.remove(name) try: fp = open(name, 'w') except IOError, first: # open() failed, assert tmpfile() fails in the same way. # Although open() raises an IOError and os.tmpfile() raises an # OSError(), 'args' will be (13, 'Permission denied') in both # cases. try: fp = os.tmpfile() except OSError, second: self.assertEqual(first.args, second.args) return else: if test_support.check_impl_detail(pypy=False): self.fail("expected os.tmpfile() to raise OSError") # on PyPy, os.tmpfile() uses the tempfile module # anyway, so works even if we cannot write in root. fp.close() else:
def run(self): bld = self.generator.bld linked = [] target_paths = [] for g in bld.groups: for tgen in g: # FIXME it might be better to check if there is a link_task (getattr?) target_paths += [tgen.path.get_bld().bldpath()] linked += [ t.outputs[0].bldpath() for t in getattr(tgen, 'tasks', []) if t.__class__.__name__ in ['cprogram', 'cshlib', 'cxxprogram', 'cxxshlib'] ] lib_list = [] if len(linked): cmd = [self.env.LDD] + linked # FIXME add DYLD_LIBRARY_PATH+PATH for osx+win32 ldd_env = { 'LD_LIBRARY_PATH': ':'.join(target_paths + self.env.LIBPATH) } # FIXME the with syntax will not work in python 2 with tmpfile() as result: self.exec_command(cmd, env=ldd_env, stdout=result) result.seek(0) for line in result.readlines(): words = line.split() if len(words) < 3 or words[1] != '=>': continue lib = words[2] if lib == 'not': continue if any([ lib.startswith(p) for p in [bld.bldnode.abspath(), '('] + self.env.SOFTLINK_EXCLUDE ]): continue if not isabs(lib): continue lib_list.append(lib) lib_list = sorted(set(lib_list)) self.outputs[0].write(linesep.join(lib_list + self.env.DYNAMIC_LIBS)) return 0
def temp_view_enum_ties(self): if self._verbose: sys.stdout.write(self.__class__.__name__ + ".temp_view_enum_ties()\n") out = os.tmpfile() out.write(""" #-------------- ENUMERATION TIES ---------------- loop_ _enumeration_ties.category_id _enumeration_ties.item_name _enumeration_ties.enum_category_id _enumeration_ties.enum_item_name """) sql = "select categoryId,itemName,enumCategoryId,enumItemName from aditenumtie " \ + "order by itemName,enumCategoryId" curs = self.connection.cursor() curs.execute(sql) while True: row = curs.fetchone() if row is None: break if row[0] is None: categoryid = "" else: categoryid = row[0].strip() if row[1] is None: itemname = "" else: itemname = row[1].strip() if row[2] is None: enumcategoryid = "" else: enumcategoryid = row[2].strip() if row[3] is None: enumitemname = "" else: enumitemname = row[3].strip() out.write("'%s' '%s' '%s' '%s'\n" % (categoryid, itemname, enumcategoryid, enumitemname)) curs.close() return out
def test_add_rows(self): """ Test the core functionality, adding a number of datarows while keeping the row immediatelly below the template row """ # XXX test for xlsx sheets without images inside! # XXX test for sparse rows! filler = self.get_mangler() filler.copy_sheet('Fancyname', 'Fancycopy') schema = [('field1', 'url'), ('field2', 'string'), ('field3', 'string')] data = [(('http://wwww.example.com', 'link'), 'example', 'ex1', 'ex2'), (('http://www.example.com/2', 'link2'), 'example', 'ex3', 'ex4')] filler.add_rows('Fancycopy', schema, data) tmpfile = os.tmpfile() filler.save(tmpfile) #tmpfile.seek(0) #file('test_result_rows_added.xlsx', 'w').write(tmpfile.read()) self.assertZipEquals(file('test_result_rows_added.xlsx'), tmpfile)
def _deliver_qmaillocal(self, msg, msginfo, delivered_to, received, stdout, stderr): try: args = (self.conf['qmaillocal'], self.conf['qmaillocal'], '--', self.conf['user'], self.conf['homedir'], msginfo['local'], msginfo['dash'], msginfo['ext'], self.conf['localdomain'], msginfo['sender'], self.conf['defaultdelivery']) self.log.debug('about to execl() with args %s\n' % str(args)) # Modify message if self.conf['strip_delivered_to']: msg.remove_header('delivered-to') # Also don't insert a Delivered-To: header. delivered_to = None # Write out message msgfile = os.tmpfile() msgfile.write(msg.flatten(delivered_to, received)) msgfile.flush() os.fsync(msgfile.fileno()) # Rewind msgfile.seek(0) # Set stdin to read from this file os.dup2(msgfile.fileno(), 0) # Set stdout and stderr to write to files os.dup2(stdout.fileno(), 1) os.dup2(stderr.fileno(), 2) change_usergroup(self.log, self.conf['user'], self.conf['group']) # At least some security... if ((os.geteuid() == 0 or os.getegid() == 0) and not self.conf['allow_root_commands']): raise getmailConfigurationError( 'refuse to invoke external commands as root or GID 0 by default') os.execl(*args) except StandardError, o: # Child process; any error must cause us to exit nonzero for parent # to detect it stderr.write('exec of qmail-local failed (%s)' % o) stderr.flush() os.fsync(stderr.fileno()) os._exit(127)
def eggnog(input, output_dir): splitLen = 100 at = 1 make_temp = "mkdir " + output_dir + "/eggNOG" os.system(make_temp) make_temp = "mkdir " + output_dir + "/eggNOG_split" os.system(make_temp) outputBase = output_dir + "/eggNOG_split/eggNOG_split" eggNOG_annotation_path = output_dir + "/eggNOG" input_split = open(input, "r").read().split("\n") for lines in range(0, len(input_split), splitLen): outputData = input_split[lines:lines + splitLen] output = open(outputBase + str(at) + '.txt', 'w') output.write('\n'.join(outputData)) output.close() at += 1 hold_files = os.listdir(output_dir + "/eggNOG_split") multi_list = [hold_files[x:x + 4] for x in range(0, len(hold_files), 4)] for x in multi_list: processes = [] for files in x: prefix = files.split("_")[1] file_path = output_dir + "/eggNOG_split/" + files call_eggNog = [ "emapper.py", "-i", file_path, "--data_dir", "../Tools/eggnog_db", "-m", "diamond", "--dmnd_db", "/home/projects/group-c/Team3-FunctionalAnnotation/Tools/eggnog_db/eggnog_proteins.dmnd", "--translate", "-d", "bact", "--output_dir", eggNOG_annotation_path, "-o", "eggNOG_" + prefix ] f = os.tmpfile() p = subprocess.Popen(call_eggNog, stdout=f) processes.append((p, f)) for p, f in processes: p.wait() f.seek(0) f.close() command = "for i in " + output_dir + "/eggNOG_split; do cat $i > " + output_dir + "/eggNOG/eggNOG_combined.txt ; done" os.system(commnand) return (output_dir + '/eggNOG/eggNOG_combined.txt')
def _initialize(self, hostname, user="******", port=22, password="", *args, **dargs): super(AbstractSSHHost, self)._initialize(hostname=hostname, *args, **dargs) self.ip = socket.getaddrinfo(self.hostname, None)[0][4][0] self.user = user self.port = port self.password = password self._use_rsync = None self.known_hosts_file = os.tmpfile() known_hosts_fd = self.known_hosts_file.fileno() self.known_hosts_fd = '/dev/fd/%s' % known_hosts_fd """ Master SSH connection background job, socket temp directory and socket control path option. If master-SSH is enabled, these fields will be initialized by start_master_ssh when a new SSH connection is initiated. """ self.master_ssh_job = None self.master_ssh_tempdir = None self.master_ssh_option = ''
def get_post_as_string (self, remotefile, body): """ Do an http POST, send body, get response and return it. """ self.log ('get_post_as_string %s' % remotefile) # headers = {'Content-Type':'application/x-www-form-urlencoded'} headers = {'Content-Type':'text/xml; charset="utf-8"'} # b64body = urlsafe_b64encode (asset_url) response = self.request ('POST', remotefile, body, headers, False) if response.status != 200: self.__err (response, ('POST', remotefile, body, headers, 0)) try: content_length = int (response.getheader ('Content-Length')) except TypeError: content_length = None tmp_handle = os.tmpfile () self.__get_file_read (tmp_handle, response, content_length) tmp_handle.seek (0) ret = tmp_handle.read () tmp_handle.close () return ret
def test_db_create(self): filename = os.tmpfile() db.create_db(filename) cliente = orm.Cliente() cliente.nombre = "cliente" cliente.direccion = "somewhere" cliente.telefono = '555' cliente.tipo = orm.TipoClienteEnum.MAYORITARIO session = db.session() session.add(cliente) session.commit() #test input with sqlite conn = sqlite3.connect(filename) c = conn.cursor() result = c.execute('select * from cliente') r = result.fetchone() self.assertEqual(r[1], cliente.nombre) self.assertEqual(r[2], cliente.telefono) self.assertEqual(r[3], cliente.direccion) self.assertEqual(r[4], cliente.tipo.name)
def open(self): if self.filename == 'php://memory': self.resource = RMemoryFile() elif self.filename == 'php://stdin': self.resource = os.fdopen(os.dup(0), 'r') elif self.filename == 'php://stdout': self.resource = os.fdopen(os.dup(1), 'w') elif self.filename == 'php://output': self.resource = os.fdopen(os.dup(1), 'w') elif self.filename == 'php://stderr': self.resource = os.fdopen(os.dup(2), 'w') elif self.filename == 'php://temp': self.resource = os.tmpfile() elif self.filename.startswith('php://fd/'): fd = self.filename[len('php://fd/'):] self.resource = os.fdopen(os.dup(int(fd)), self.mode) else: self.resource = open(self.filename, self.mode) self.resource.seek(0, 0) self.state = OPEN self.space.ec.interpreter.register_fd(self)
def test_add_many_rows(self): """ Test that adding lots of rows will not raise errors """ # XXX test for xlsx sheets without images inside! # XXX test for sparse rows! filler = self.get_mangler() filler.copy_sheet('Fancyname', 'Fancycopy') schema = [('field1', 'url'), ('field2', 'string'), ('field3', 'string')] data = [(('http://wwww.example.com', 'link'), 'example', 'ex1', 'ex2') for ignore in range(100)] filler.add_rows('Fancycopy', schema, data) tmpfile = os.tmpfile() filler.save(tmpfile) #tmpfile.seek(0) #file('test_result_rows_added_many_rows.xlsx', # 'w').write(tmpfile.read()) self.assertZipEquals(file('test_result_rows_added_many_rows.xlsx'), tmpfile)
def setBootMode(mode): if mode == getBootMode(): return SEEDIT_SUCCESS tmpfh = os.tmpfile() try: fh = open(gSELinuxConfigFile, 'r') lines = fh.readlines() fh.close() except: tmpfh.close() return SEEDIT_ERROR pat = re.compile("^[\s\t]*SELINUX[\s\t]*=") for line in lines: m = pat.search(line) if m: if mode == ENFORCING: tmpfh.write("SELINUX=enforcing\n") elif mode == PERMISSIVE: tmpfh.write("SELINUX=permissive\n") else: tmpfh.write("SELINUX=disabled\n") else: tmpfh.write(line) tmpfh.seek(0) lines = tmpfh.readlines() try: fh = open(gSELinuxConfigFile, 'w') except: tmpfh.close() return SEEDIT_ERROR for line in lines: fh.write(line) fh.close() return SEEDIT_SUCCESS
def image(request): guests = request.GET.get('guests', '') guests = guests.split(',') if guests == ['']: guests = [] measure = request.GET.get('measure', None) duration = int(request.GET.get('duration', 100)) pyplot.clf() for guest in guests: times = [] values = [] measurements = Measurement.objects.filter(guest=guest, measure=measure) maxTime = datetime.datetime(1900, 1, 1) for measurement in measurements: if maxTime < measurement.time: maxTime = measurement.time minTime = maxTime - datetime.timedelta(seconds=duration) for measurement in measurements: if minTime <= measurement.time: times.append(measurement.time) values.append(measurement.value) pyplot.plot(times, values, 'o-', label=guest) pyplot.xlabel('time') pyplot.ylabel('value') pyplot.title(measure) pyplot.legend() tmpfile = os.tmpfile() pyplot.savefig(tmpfile, dpi=(640 / 8)) tmpfile.seek(0) data = tmpfile.read() tmpfile.close() return HttpResponse(data, mimetype="image/png")
def test_fromdb(self): # mock/stub private methods which do external calls self.factory._makeJobDir = Mock(return_value='/mydir') self.factory._copyFile = Mock() self.factory._makeJobSession = Mock(return_value=initTestingDB()) dbfile = os.tmpfile() job = self.factory.fromDb(dbfile, u'bob') self.assertIsInstance(job.id, uuid.UUID) self.assertEqual(job.dir, u'/mydir') self.assertEqual(job.meta.owner, u'bob') self.assertEqual(job.meta.description, u'My first description') self.assertEqual(job.meta.ms_filename, u'F123456.mzxml') self.assertEqual(job.meta.state, u'STOPPED') self.factory._makeJobDir.assert_called_with(job.id) self.factory._copyFile.assert_called_with(dbfile, job.id) o = mu.DBSession().query(mu.JobMeta.owner ).filter(mu.JobMeta.jobid == job.id).scalar() self.assertEqual(o, u'bob', 'job meta has been inserted')
def _create_filehandle(name, mode, position, closed, open=open): # buffering=0 # only pickles the handle, not the file contents... good? or StringIO(data)? # (for file contents see: http://effbot.org/librarybook/copy-reg.htm) # NOTE: handle special cases first (are there more special cases?) names = {'<stdin>':sys.__stdin__, '<stdout>':sys.__stdout__, '<stderr>':sys.__stderr__} #XXX: better fileno=(0,1,2) ? if name in list(names.keys()): f = names[name] #XXX: safer "f=sys.stdin" elif name == '<tmpfile>': import os; f = os.tmpfile() elif name == '<fdopen>': import tempfile; f = tempfile.TemporaryFile(mode) else: try: # try to open the file by name # NOTE: has different fileno f = open(name, mode)#FIXME: missing: *buffering*, encoding,softspace except IOError: err = sys.exc_info()[1] try: # failing, then use /dev/null #XXX: better to just fail here? import os; f = open(os.devnull, mode) except IOError: raise UnpicklingError(err) #XXX: python default is closed '<uninitialized file>' file/mode if closed: f.close() elif position >= 0: f.seek(position) return f
def runtest( self ): dirname = self.invocation.dirname basename = self.invocation.basename extension = self.invocation.extension switches = self.invocation.switches arguments = self.invocation.arguments change_dir = "" if ( self.invocation.cd_to_file != None ): cd_to_file = self.invocation.cd_to_file else: cd_to_file = self.scoreboard.config.cd_to_file lang = self.extension_to_language(extension) if cd_to_file: if dirname != "" and dirname != ".": command = "cd %s; %s %s%s %s %s" % ( dirname, lang, basename, extension, switches, arguments) tmpfile = "%s/%s.testresult"%( dirname, basename) else: command = "%s %s%s %s %s" % ( lang, basename, extension, switches, arguments) tmpfile = "./%s.testresult"%( basename) else: command = "%s %s/%s%s %s %s" % ( lang, dirname, basename, extension, switches, arguments ) tmpfile = "%s/%s.testresult"%( dirname, basename) # print( "command = \"%s\"" % command) filehandle2 = os.tmpfile() filehandle3 = open( tmpfile, "w"); self.exit_status = LaunchTest(command, filehandle2, filehandle3) if self.exit_status: self.exit_status = self.exit_status >> 8 filehandle2.seek(0) self.collect_results( filehandle2 ) filehandle2.close() filehandle3.close()
def send_request(request): try: response = os.tmpfile(); return_code = subprocess.Popen(["curl", "-q", "--negotiate", "-s", "-u", ":", "--insecure", "-w", "", "--url", server + request], bufsize=0, executable=None, stdin=None, stdout=response, stderr=subprocess.STDOUT, preexec_fn=None, close_fds=True, shell=False, cwd=None, env=None, universal_newlines=False, startupinfo=None, creationflags=0).wait() except: print "Could not run curl" raise if return_code: print "Communication with server failed" return None response.seek(0) response = json.load(response) if not "status" in response: print 'Strange json response without "status"' print response return None if response["status"] == "error": print "Server responded with error status" print response return None if response["status"] != "ok": print "Server responded with strange status" print response return None return response
def _deliver_command(self, msg, msginfo, delivered_to, received, stdout, stderr): try: # Write out message with native EOL convention msgfile = os.tmpfile() msgfile.write(msg.flatten(delivered_to, received, include_from=self.conf['unixfrom'])) msgfile.flush() os.fsync(msgfile.fileno()) # Rewind msgfile.seek(0) # Set stdin to read from this file os.dup2(msgfile.fileno(), 0) # Set stdout and stderr to write to files os.dup2(stdout.fileno(), 1) os.dup2(stderr.fileno(), 2) change_usergroup(self.log, self.conf['user'], self.conf['group']) # At least some security... if ((os.geteuid() == 0 or os.getegid() == 0) and not self.conf['allow_root_commands']): raise getmailConfigurationError( 'refuse to invoke external commands as root or GID 0 by default') args = [self.conf['path'], self.conf['path']] for arg in self.conf['arguments']: arg = expand_user_vars(arg) for (key, value) in msginfo.items(): arg = arg.replace('%%(%s)' % key, value) args.append(arg) self.log.debug('about to execl() with args %s\n' % str(args)) os.execl(*args) except StandardError, o: # Child process; any error must cause us to exit nonzero for parent # to detect it stderr.write('exec of command %s failed (%s)' % (self.conf['command'], o)) stderr.flush() os.fsync(stderr.fileno()) os._exit(127)
def __parse_args(self, chunk): """ Parse request header. Defer to __parse_body if request entity body is indicated. """ eol = chunk.find('\n') + 1 if eol == 0: return 0 line = chunk[:eol] if ':' in line: Params.log('> ' + line.rstrip(), 2) key, value = line.split(':', 1) if key.lower() in HTTP.Header_Map: key = HTTP.Header_Map[key.lower()] else: Params.log( "Warning: %r not a known HTTP (request) header (%r)" % (key, value.strip()), 1) key = key.title() assert key not in self.__headers, 'duplicate req. header: %s' % key self.__headers[key] = value.strip() elif line in ('\r\n', '\n'): self.__size = int(self.__headers.get('Content-Length', 0)) if self.__size: assert self.__verb == 'POST', \ '%s request conflicts with message body' % self.__verb Params.log('Opening temporary file for POST upload', 1) self.__body = os.tmpfile() self.__parse = self.__parse_body else: self.__body = None self.__parse = None else: Params.log('Warning: Ignored header line: %r' % line) return eol
def runVncViewer(domid, do_autopass, do_daemonize=False): xs = OurXenstoreConnection() d = '/local/domain/%d/' % domid vnc_port = xs.read_eventually(d + 'console/vnc-port') vfb_backend = xs.read_maybe(d + 'device/vfb/0/backend') vnc_listen = None vnc_password = None vnc_password_tmpfile = None cmdl = ['vncviewer'] if vfb_backend is not None: vnc_listen = xs.read_maybe(vfb_backend + '/vnclisten') if do_autopass: vnc_password = xs.read_maybe(vfb_backend + '/vncpasswd') if vnc_password is not None: cmdl.append('-autopass') vnc_password_tmpfile = os.tmpfile() print >> vnc_password_tmpfile, vnc_password vnc_password_tmpfile.seek(0) vnc_password_tmpfile.flush() if vnc_listen is None: vnc_listen = 'localhost' cmdl.append('%s:%d' % (vnc_listen, int(vnc_port) - 5900)) if do_daemonize: pid = utils.daemonize('vncviewer', cmdl, vnc_password_tmpfile) if pid == 0: print >> sys.stderr, 'failed to invoke vncviewer' os._exit(-1) else: print 'invoking ', ' '.join(cmdl) if vnc_password_tmpfile is not None: os.dup2(vnc_password_tmpfile.fileno(), 0) try: os.execvp('vncviewer', cmdl) except OSError: print >> sys.stderr, 'Error: external vncviewer missing or not \ in the path\nExiting' os._exit(-1)
def load_script(path, module_name): fp, fname = tools.file_open(path, pathinfo=True) fp2 = None # OpenUpgrade edit start: # Don't copy the migration script into a temp directory. Instead, # replace the call to load_source with a call to load_module so that # the frame isn't lost and breakpoints can be set # pylint: disable=file-builtin,undefined-variable if False and not isinstance(fp, file): # imp.load_source need a real file object, so we create # one from the file-like object we get from file_open fp2 = os.tmpfile() fp2.write(fp.read()) fp2.seek(0) try: return imp.load_module(module_name, fp, fname, ('.py', 'r', imp.PY_SOURCE)) finally: if fp: fp.close() if fp2: fp2.close()
def run_updates(staging, cores): """ run update.php on each wiki found in dblist """ procs = [] with open(staging, "r") as dblist: dbs = ignore_comments_and_emptylines(dblist) for db in dbs: f = os.tmpfile() cmd = ( "echo '%(db)s'; /usr/local/bin/mwscript update.php --wiki=%(db)s --quick" % { 'db': db }) p = subprocess.Popen(cmd, stdout=f, stderr=f, shell=True) procs.append((p, f, cmd)) if (len(procs) >= cores): do_wait(procs) procs = [] # catches odd cases where dblist file is smaller than batch size if (len(procs) > 0): do_wait(procs)