Пример #1
0
    def setUpClass(cls):
        cls.empty_file = tempfile.mkstemp()
        cls.empty_dir = tempfile.mkdtemp()

        # Create one good textfile
        cls.good_file = tempfile.mkstemp()

        # Create a color image
        cls.color_image_file = tempfile.mkstemp(suffix='.png')
        cls.numpy_image_color = np.ones((8,10,3), dtype='uint8')
        cls.pil_image_color = PIL.Image.fromarray(cls.numpy_image_color)
        cls.pil_image_color.save(cls.color_image_file[1])

        # Create a grayscale image
        cls.gray_image_file = tempfile.mkstemp(suffix='.png')
        cls.numpy_image_gray = np.ones((8,10), dtype='uint8')
        cls.pil_image_gray = PIL.Image.fromarray(cls.numpy_image_gray)
        cls.pil_image_gray.save(cls.gray_image_file[1])

        cls.image_count = 0
        for i in xrange(3):
            for j in xrange(3):
                os.write(cls.good_file[0], '%s %s\n' % (cls.color_image_file[1], i))
                os.write(cls.good_file[0], '%s %s\n' % (cls.gray_image_file[1], i))
                cls.image_count += 2
Пример #2
0
def PlotFit( g, data, cols=(0,1) ):

    fh1, fn1 = tempfile.mkstemp()
    a,b = cols
    os.close(fh1)
    outfile = open(fn1, "w")
    for d in data: outfile.write("%f\t%f\n" % (d[a], d[b]))
    outfile.close()
    
    parameters = {}
    fh2, fn2 = tempfile.mkstemp()
    fh3, fn3 = tempfile.mkstemp()    
    os.close(fh2)
    os.close(fh3)
    open(fn2, 'w').write('m=0\nb=0\n')
    g("f%i(x) = m * x + y0" % b) 
    g("fit f%i(x) '%s' using 1:2 via y0, m" % (b, fn1))
    g("replot f%i(x)" % (b))
    
##     g('fit m*x+b "%s" via "%s"' % (fn1, fn2) )    
##     g('update "%s" "%s"' % (fn2, fn3))
##     execfile( fn3, globals(), parameters )
##     g.replot( Gnuplot.Func( "%f*x + %f" % (parameters['m'], parameters['b']) ) )
        
    return [fn1, fn2, fn3]
Пример #3
0
def tag_raw_data(raw) :
    (name, temp) = tempfile.mkstemp()
    (rname, rtemp) = tempfile.mkstemp()
    f = os.fdopen(name, 'w') #weird function because we are using a tempfile
    f.writelines([r + '\n' for r in raw])
    f.close()

    #call external tagger
    print(TAG_COMMAND.format(temp))
    f = os.fdopen(rname)
    result = subprocess.call(TAG_COMMAND.format(temp),stdout=f, cwd=TAGGER_PATH, shell=True) 
    #read them back in
    f.seek(0)
    result = f.readlines()
    f.close()
    result = [x.strip() for x in result]


    final_result = []
    buf = []
    for i in range(len(result)) :
        if (result[i] == '') :
            final_result.append(tuple(buf)) #you have to cut of that extra ' '
            buf = []
        else :
            pieces = result[i].split('\t')
            buf.append( (pieces[0],pieces[1] ) )

    if (buf != []) :
        final_result.append(buf)

    
    return final_result
Пример #4
0
def compile_and_load(src_files, obj_files=[], cc="g++", flags=["O3", "Wall"], includes=[], links=[], defs=[]):
    """ Compile and load a shared object from a source file
  \param src_files list of source fiels (eg. ['~/src/foo.cc'])
  \param cc the path to the c++ compiler
  \param obj_files name of files to compile (eg. ['~/obj/foo.o'])
  \param flags list of falgs for the compiler (eg. ['O3', 'Wall'])
  \param includes list of directories to include (eg. ['~/includes/'])
  \param links list of libraries to link with (eg. ['pthread', 'gtest'])
  \param defs list of names to define with -D (eg. ['ENABLE_FOO'])
  \return (lib, fin) link to the library and a function to call to close the library
  """
    __, obj_name = tempfile.mkstemp(suffix=".o", dir=TEMP_DIR)
    os.close(__)
    __, lib_name = tempfile.mkstemp(suffix=".so", dir=TEMP_DIR)
    os.close(__)

    compile_bin(obj_name, src_files, cc=cc, flags=flags, includes=includes, links=links, defs=defs, lib=True)
    # add the newly compiled object file to the list of objects for the lib
    obj_files = list(obj_files)
    obj_files.append(obj_name)
    compile_so(lib_name, obj_files, cc=cc, links=links)

    def finalize():
        if os.path.exists(obj_name):
            os.unlink(obj_name)
        if os.path.exists(lib_name):
            os.unlink(lib_name)

    try:
        lib = ctypes.CDLL(lib_name)
        return lib, finalize
    except OSError:
        print "Failed link with library, source files:"
        print ", ".join(src_files)
        raise
Пример #5
0
 def print_xcf(self, filename_or_obj, *args, **kwargs):
     "Writes the figure to a GIMP XCF image file"
     # If filename_or_obj is a file-like object we need a temporary file for
     # GIMP's output too...
     if is_string(filename_or_obj):
         out_temp_handle, out_temp_name = None, filename_or_obj
     else:
         out_temp_handle, out_temp_name = tempfile.mkstemp(suffix='.xcf')
     try:
         # Create a temporary file and write the "layer" to it as a PNG
         in_temp_handle, in_temp_name = tempfile.mkstemp(suffix='.png')
         try:
             FigureCanvasAgg.print_png(self, in_temp_name, *args, **kwargs)
             run_gimp_script(
                 SINGLE_LAYER_SCRIPT.format(
                     input=quote_string(in_temp_name),
                     output=quote_string(out_temp_name)))
         finally:
             os.close(in_temp_handle)
             os.unlink(in_temp_name)
     finally:
         if out_temp_handle:
             os.close(out_temp_handle)
             # If we wrote the XCF to a temporary file, write its content to
             # the file-like object we were given (the copy is chunked as
             # XCF files can get pretty big)
             with open(out_temp_name, 'rb') as source:
                 for chunk in iter(lambda: source.read(131072), ''):
                     filename_or_obj.write(chunk)
             os.unlink(out_temp_name)
Пример #6
0
def ndiff(scan_a, scan_b):
    """Run Ndiff on two scan results, which may be filenames or NmapParserSAX
    objects, and return a running NdiffCommand object."""
    temporary_filenames = []

    if isinstance(scan_a, NmapParserSAX):
        fd, filename_a = tempfile.mkstemp(
                prefix=APP_NAME + "-diff-",
                suffix=".xml"
                )
        temporary_filenames.append(filename_a)
        f = os.fdopen(fd, "wb")
        scan_a.write_xml(f)
        f.close()
    else:
        filename_a = scan_a

    if isinstance(scan_b, NmapParserSAX):
        fd, filename_b = tempfile.mkstemp(
                prefix=APP_NAME + "-diff-",
                suffix=".xml"
                )
        temporary_filenames.append(filename_b)
        f = os.fdopen(fd, "wb")
        scan_b.write_xml(f)
        f.close()
    else:
        filename_b = scan_b

    return NdiffCommand(filename_a, filename_b, temporary_filenames)
Пример #7
0
def _execute(cmds):
    import subprocess
    stderrPath = tempfile.mkstemp()[1]
    stdoutPath = tempfile.mkstemp()[1]
    stderrFile = open(stderrPath, "w")
    stdoutFile = open(stdoutPath, "w")
    # get the os.environ
    env = _makeEnviron()
    # make a string of escaped commands
    cmds = subprocess.list2cmdline(cmds)
    # go
    popen = subprocess.Popen(cmds, stderr=stderrFile, stdout=stdoutFile, env=env, shell=True)
    popen.wait()
    # get the output
    stderrFile.close()
    stdoutFile.close()
    stderrFile = open(stderrPath, "r")
    stdoutFile = open(stdoutPath, "r")
    stderr = stderrFile.read()
    stdout = stdoutFile.read()
    stderrFile.close()
    stdoutFile.close()
    # trash the temp files
    os.remove(stderrPath)
    os.remove(stdoutPath)
    # done
    return stderr, stdout
Пример #8
0
    def test_H_get_put(self, sftp):
        """
        verify that get/put work.
        """
        warnings.filterwarnings('ignore', 'tempnam.*')

        fd, localname = mkstemp()
        os.close(fd)
        text = b'All I wanted was a plastic bunny rabbit.\n'
        with open(localname, 'wb') as f:
            f.write(text)
        saved_progress = []

        def progress_callback(x, y):
            saved_progress.append((x, y))
        sftp.put(localname, sftp.FOLDER + '/bunny.txt', progress_callback)

        with sftp.open(sftp.FOLDER + '/bunny.txt', 'rb') as f:
            assert text == f.read(128)
        assert [(41, 41)] == saved_progress

        os.unlink(localname)
        fd, localname = mkstemp()
        os.close(fd)
        saved_progress = []
        sftp.get(sftp.FOLDER + '/bunny.txt', localname, progress_callback)

        with open(localname, 'rb') as f:
            assert text == f.read(128)
        assert [(41, 41)] == saved_progress

        os.unlink(localname)
        sftp.unlink(sftp.FOLDER + '/bunny.txt')
Пример #9
0
    def take_screenshot(self, filename):
        """Take a screenshot and save it to 'filename'"""

        # make sure filename is unicode
        filename = ensure_unicode(filename, "utf-8")

        if get_platform() == "windows":
            # use win32api to take screenshot
            # create temp file
            
            f, imgfile = tempfile.mkstemp(u".bmp", filename)
            os.close(f)
            mswin.screenshot.take_screenshot(imgfile)
        else:
            # use external app for screen shot
            screenshot = self.get_external_app("screen_shot")
            if screenshot is None or screenshot.prog == "":
                raise Exception(_("You must specify a Screen Shot program in Application Options"))

            # create temp file
            f, imgfile = tempfile.mkstemp(".png", filename)
            os.close(f)

            proc = subprocess.Popen([screenshot.prog, imgfile])
            if proc.wait() != 0:
                raise OSError("Exited with error")

        if not os.path.exists(imgfile):
            # catch error if image is not created
            raise Exception(_("The screenshot program did not create the necessary image file '%s'") % imgfile)

        return imgfile  
Пример #10
0
 def _test_batch(self, setsize, batchsize):
     # Create a set of testfiles
     dummyfiles = []
     for i in range(setsize):
         fh, dummyfile = tempfile.mkstemp(suffix=".batcher_test", dir=self._test_folder)
         os.close(fh)
         dummyfiles.append(dummyfile)
     # Create a file that should not be returned
     fh, nomatchfile = tempfile.mkstemp(suffix=".batcher_nomatch_test", dir=self._test_folder)
     os.close(fh)
     
     # Pattern 
     pattern = os.path.join(self._test_folder, "*.batcher_test")
     batcher = Batcher(pattern,batchsize)
     
     expected_bno = 0
     for b in batcher:
         expected_size = min(batchsize, setsize - batchsize*expected_bno)
         expected_bno += 1
         # Assert that each batch has the expected size
         self.assertEqual(len(b),expected_size, "The returned batchsize does not match the expected size ({} != {})".format(len(b),expected_size))
         # Assert that the batch enumerator gives the correct batch number
         self.assertEqual(batcher.batchno(),expected_bno, "The returned batch number does not match the expected ({} != {})".format(batcher.batchno(),expected_bno))
 
         for bf in b:
             # Assert that the returned files existed in the original array and has not already been returned 
             self.assertIn(bf, dummyfiles, "The returned file is not present in the expected set ({})".format(bf))
             self.assertTrue(os.path.exists(bf), "The returned file has already been seen ({})".format(bf))
             # Remove the returned files
             os.unlink(bf)
     
     # Assert that all files have been returned by the iterator
     for dummyfile in dummyfiles:
         self.assertFalse(os.path.exists(dummyfile), "Some files were not iterated over ({})".format(dummyfile))
     os.unlink(nomatchfile)
    def test_pipeline_with_temp(self):
        input_f = tempfile.mkstemp(suffix='capsul_input.txt')
        os.close(input_f[0])
        input_name = input_f[1]
        open(input_name, 'w').write('this is my input data\n')
        output_f = tempfile.mkstemp(suffix='capsul_output.txt')
        os.close(output_f[0])
        output_name = output_f[1]
        #os.unlink(output_name)

        try:
            self.pipeline.input_image = input_name
            self.pipeline.output_image = output_name

            # run sequentially
            self.pipeline()

            # test
            self.assertTrue(os.path.exists(output_name))
            self.assertEqual(open(input_name).read(), open(output_name).read())

        finally:
            try:
                os.unlink(input_name)
            except: pass
            try:
                os.unlink(output_name)
            except: pass
Пример #12
0
def chunk_scaffolds(target, size):
    chromos = []
    # split target file into `options.size` (~10 Mbp) chunks
    temp_fd, temp_out = tempfile.mkstemp(suffix='.fasta')
    os.close(temp_fd)
    temp_out_handle = open(temp_out, 'w')
    tb = bx.seq.twobit.TwoBitFile(file(target))
    sequence_length = 0
    tb_key_len = len(tb.keys()) - 1
    print '\nRunning against {}'.format(os.path.basename(target))
    print 'Running with the --huge option.  Chunking files into {0} bp...'.format(size)
    for sequence_count, seq in enumerate(tb.keys()):
        sequence = tb[seq][0:]
        sequence_length += len(sequence)
        # write it to the outfile
        temp_out_handle.write('>{0}\n{1}\n'.format(seq, sequence))
        if sequence_length > size:
            temp_out_handle.close()
            # put tempfile name on stack
            chromos.append(temp_out)
            # open a new temp file
            temp_fd, temp_out = tempfile.mkstemp(suffix='.fasta')
            os.close(temp_fd)
            temp_out_handle = open(temp_out, 'w')
            # reset sequence length
            sequence_length = 0
        # if we hit the end of the twobit file
        elif sequence_count >= tb_key_len:
	    temp_out_handle.close()
            # put tempfile name on stack
            chromos.append(temp_out)
    return chromos
Пример #13
0
    def test_cache(self):
        """Test the caching mechanism in the reporter."""
        length = random.randint(1, 30)
        exit_code = random.randint(0, 3)
        threshold = random.randint(0, 10)

        message = ''.join(random.choice(string.printable) for x in range(length))
        message = message.rstrip()

        (handle, filename) = tempfile.mkstemp()
        os.unlink(filename)
        os.close(handle)
        reporter = NagiosReporter('test_cache', filename, threshold, self.nagios_user)

        nagios_exit = [NAGIOS_EXIT_OK, NAGIOS_EXIT_WARNING, NAGIOS_EXIT_CRITICAL, NAGIOS_EXIT_UNKNOWN][exit_code]

        reporter.cache(nagios_exit, message)

        (handle, output_filename) = tempfile.mkstemp()
        os.close(handle)

        try:
            old_stdout = sys.stdout
            buffer = StringIO.StringIO()
            sys.stdout = buffer
            reporter_test = NagiosReporter('test_cache', filename, threshold, self.nagios_user)
            reporter_test.report_and_exit()
        except SystemExit, err:
            line = buffer.getvalue().rstrip()
            sys.stdout = old_stdout
            buffer.close()
            self.assertTrue(err.code == nagios_exit[0])
            self.assertTrue(line == "%s %s" % (nagios_exit[1], message))
Пример #14
0
    def test_simple(self):
        outstream = BytesIO()
        errstream = BytesIO()

        # create a file for initial commit
        handle, fullpath = tempfile.mkstemp(dir=self.repo.path)
        filename = os.path.basename(fullpath)
        porcelain.add(repo=self.repo.path, paths=filename)
        porcelain.commit(repo=self.repo.path, message='test',
                         author='test', committer='test')

        # Setup target repo
        target_path = tempfile.mkdtemp()
        porcelain.clone(self.repo.path, target=target_path, outstream=outstream)

        # create a second file to be pushed
        handle, fullpath = tempfile.mkstemp(dir=self.repo.path)
        filename = os.path.basename(fullpath)
        porcelain.add(repo=self.repo.path, paths=filename)
        porcelain.commit(repo=self.repo.path, message='test2',
            author='test2', committer='test2')

        # Pull changes into the cloned repo
        porcelain.pull(target_path, self.repo.path, 'refs/heads/master',
            outstream=outstream, errstream=errstream)

        # Check the target repo for pushed changes
        r = Repo(target_path)
        self.assertEqual(r['HEAD'].id, self.repo['HEAD'].id)
Пример #15
0
def run_shell_cmd(cmd, echo=True):
    """
    Run a command in a sub-shell, capturing stdout and stderr
    to temporary files that are then read.
    """
    _, stdout_f = tempfile.mkstemp()
    _, stderr_f = tempfile.mkstemp()

    print("Running command")
    print(cmd)
    p = subprocess.Popen(
        '{} >{} 2>{}'.format(cmd, stdout_f, stderr_f), shell=True)
    p.wait()

    with open(stdout_f) as f:
        stdout = f.read()
    os.remove(stdout_f)

    with open(stderr_f) as f:
        stderr = f.read()
    os.remove(stderr_f)

    if echo:
        print("stdout:")
        print(stdout)
        print("stderr:")
        print(stderr)

    return stdout, stderr
Пример #16
0
 def setUp(self):
     # Changes CONFIGDEFAULT
     super(TestConfig, self).setUp()
     osfd, filename = tempfile.mkstemp(suffix='.ini',
                                       dir=self.config.CONFIGDEFAULT)
     os.close(osfd)
     self.config.DEFAULTSFILE = filename
     self.deffile = open(filename, 'wb')
     # ConfigSection will open again
     self.deffile.close()
     # ConfigDict forbids writing
     foo = self.config.ConfigSection(None, 'DEFAULTS')
     # Verify these all are converted to lower-case automatically
     foo.set('tEsTOPTioNi', 2)  # non-string values should also convert
     foo.set('TesToPTIONf', 3.14)
     foo.set('testoptionS', "foobarbaz")
     foo.write(self.deffile)
     # Set up separate test config file
     osfd, filename = tempfile.mkstemp(suffix='.ini',
                                       dir=self.config.CONFIGDEFAULT)
     os.close(osfd)
     # in case a test needs it
     self.cfgfile = open(filename, 'wb')
     # ConfigSection will open again
     self.cfgfile.close()
     # ConfigDict forbids writing
     bar = self.config.ConfigSection(None, 'TestSection')
     bar.set('TestOptionB', False)
     bar.set('TesTopTIONs', "baz!")
     bar.set("testoptionx", "True")  # should convert to boolean
     bar.write(self.cfgfile)
Пример #17
0
    def testParseMaincfg(self):
        """ Test parsing of different broker_module declarations """
        path = "/var/lib/nagios/rw/livestatus"  # Path to the livestatus socket

        # Test plain setup with no weird arguments
        fd, filename = tempfile.mkstemp()
        os.write(fd, 'broker_module=./livestatus.o /var/lib/nagios/rw/livestatus')
        status = pynag.Parsers.mk_livestatus(nagios_cfg_file=filename)
        self.assertEqual(path, status.livestatus_socket_path)
        os.close(fd)

        # Test what happens if arguments are provided
        fd, filename = tempfile.mkstemp()
        os.write(fd, 'broker_module=./livestatus.o /var/lib/nagios/rw/livestatus hostgroups=t')
        status = pynag.Parsers.mk_livestatus(nagios_cfg_file=filename)
        self.assertEqual(path, status.livestatus_socket_path)
        os.close(fd)

        # Test what happens if arguments are provided before and after file socket path
        fd, filename = tempfile.mkstemp()
        os.write(fd, 'broker_module=./livestatus.o  num_client_threads=20 /var/lib/nagios/rw/livestatus hostgroups=t')
        status = pynag.Parsers.mk_livestatus(nagios_cfg_file=filename)
        self.assertEqual(path, status.livestatus_socket_path)
        os.close(fd)

        # Test what happens if livestatus socket path cannot be found
        try:
            fd, filename = tempfile.mkstemp()
            os.write(fd, 'broker_module=./livestatus.o  num_client_threads=20')
            status = pynag.Parsers.mk_livestatus(nagios_cfg_file=filename)
            self.assertEqual(path, status.livestatus_socket_path)
            os.close(fd)
            self.assertEqual(True, "Above could should have raised exception")
        except pynag.Parsers.ParserError:
            pass
Пример #18
0
def translate(args):
	if args.update_translation:
		#from website
		_,fname = mkstemp(suffix=".pot")
		os.system("pybabel extract -F website/babel.cfg -o %s website/" % fname)
		os.system("pybabel update -D messages -i %s -d translations/" % fname)
		os.remove(fname)

		#from documentation
		from website.utils import Documentation
		fhandle,fname = mkstemp(suffix=".pot")
		with os.fdopen(fhandle,"w") as fid:
			docs = Documentation(os.path.join(args.repo,'website','docs','sources'))
			docs.extract_messages(fid)
		os.system("pybabel update -D docs -d translations -i %s" % fname)
		os.remove(fname)

		#from parts data
		repo = Repository(args.repo)
		from backends.translations import TranslationBackend
		fhandle,fname = mkstemp(suffix=".pot")
		TranslationBackend(repo,[]).write_output(fname)

		os.system("pybabel update -D parts -d translations -i %s" % fname)
		os.remove(fname)

	if args.compile_translation:
		os.system("pybabel compile -D messages -d translations/")
		os.system("pybabel compile -D parts -d translations/")
		os.system("pybabel compile -D docs -d translations/")
Пример #19
0
    def prepare_files(self):
        if urlparse.urlsplit(self.inurl)[0] == 'file':
            self.infname = urllib.url2pathname(urlparse.urlsplit(self.inurl)[2])
            self.infd = open(self.infname)
        else:
            # not a file url. download it.
            source = urllib.urlopen(self.inurl)
            self.infd, self.infname = tempfile.mkstemp(prefix="transcode-in-",
                suffix="." + self.inext)
            self._files_to_clean_up_on_success.append((self.infd, self.infname))
            self._files_to_clean_up_on_error.append((self.infd, self.infname))
            while True:
                chunk = source.read(1024 * 64)
                if not chunk:
                    break
                os.write(self.infd, chunk)
            os.lseek(self.infd, 0, 0)

        self.outfd, self.outfname = tempfile.mkstemp(prefix="transcode-out-",
            suffix="." + self.tofmt)
        self._files_to_clean_up_on_error.append((self.outfd, self.outfname))

        self.errfh, self.errfname = tempfile.mkstemp(prefix="transcode-",
            suffix=".log")
        self.outurl = urlparse.urlunsplit(
            ["file", None, self.outfname, None, None])
        self._files_to_clean_up_on_success.append((self.errfh, self.errfname))
        log.debug("Reading from " + self.infname + " (" + self.inurl + ")")
        log.debug("Outputting to " + self.outfname + " (" + self.outurl + ")")
        log.debug("Errors to " + self.errfname)
Пример #20
0
    def setUp(self):
        self.tmpdir = tempfile.mkdtemp()
        self.rest_service_log = tempfile.mkstemp()[1]
        self.securest_log_file = tempfile.mkstemp()[1]
        self.file_server = FileServer(self.tmpdir)
        self.addCleanup(self.cleanup)
        self.file_server.start()
        storage_manager.storage_manager_module_name = \
            STORAGE_MANAGER_MODULE_NAME

        # workaround for setting the rest service log path, since it's
        # needed when 'server' module is imported.
        # right after the import the log path is set normally like the rest
        # of the variables (used in the reset_state)
        tmp_conf_file = tempfile.mkstemp()[1]
        json.dump({'rest_service_log_path': self.rest_service_log,
                   'rest_service_log_file_size_MB': 1,
                   'rest_service_log_files_backup_count': 1,
                   'rest_service_log_level': 'DEBUG'},
                  open(tmp_conf_file, 'w'))
        os.environ['MANAGER_REST_CONFIG_PATH'] = tmp_conf_file
        try:
            from manager_rest import server
        finally:
            del(os.environ['MANAGER_REST_CONFIG_PATH'])

        server.reset_state(self.create_configuration())
        utils.copy_resources(config.instance().file_server_root)
        server.setup_app()
        server.app.config['Testing'] = True
        self.app = server.app.test_client()
        self.client = self.create_client()
        self.initialize_provider_context()
Пример #21
0
def add_lang(args):
	#from website
	_,fname = mkstemp(suffix=".pot")
	os.system("pybabel extract -F website/babel.cfg -o %s website/" % fname)
	os.system("pybabel init -D messages -d translations/ -i %s -l %s" % (fname,args.lang))
	os.remove(fname)

	#from documentation
	from website.utils import Documentation
	fhandle,fname = mkstemp(suffix=".pot")
	with os.fdopen(fhandle,"w") as fid:
		docs = Documentation(os.path.join(args.repo,'website','docs','sources'))
		docs.extract_messages(fid)
	os.system("pybabel init -D docs -d translations/ -i %s -l %s" % (fname,args.lang))
	os.remove(fname)

	#from parts data
	repo = Repository(args.repo)
	from backends.translations import TranslationBackend
	fhandle,fname = mkstemp(suffix=".pot")
	TranslationBackend(repo,[]).write_output(fname)

	os.system("pybabel init -D parts -d translations/ -i %s -l %s" % (fname,args.lang))
	os.remove(fname)
	print("Don't forget to edit website/templates/base.html and add the language to the dropdown menu")
Пример #22
0
def process_delta(data, delta):
    if not delta.specific.delta:
        return data
    if delta.specific.delta == 'cat':
        datalines = data.split('\n')
        for line in delta.data.split('\n'):
            if not line:
                continue
            if line[0] == '+':
                datalines.append(line[1:])
            elif line[0] == '-':
                if line[1:] in datalines:
                    datalines.remove(line[1:])
        return "\n".join(datalines)
    elif delta.specific.delta == 'diff':
        basehandle, basename = tempfile.mkstemp()
        basefile = open(basename, 'w')
        basefile.write(data)
        basefile.close()
        os.close(basehandle)
        dhandle, dname = tempfile.mkstemp()
        dfile = open(dname, 'w')
        dfile.write(delta.data)
        dfile.close()
        os.close(dhandle)
        ret = os.system("patch -uf %s < %s > /dev/null 2>&1" \
                        % (basefile.name, dfile.name))
        output = open(basefile.name, 'r').read()
        [os.unlink(fname) for fname in [basefile.name, dfile.name]]
        if ret >> 8 != 0:
            raise Bcfg2.Server.Plugin.PluginExecutionError, ('delta', delta)
        return output
Пример #23
0
def cmpIgProfReport(outdir,file1,file2,IgProfMemOpt=""):
    (tfile1, tfile2) = ("", "")
    try:
        # don't make temp files in /tmp because it's never bloody big enough
        (th1, tfile1) = tmp.mkstemp(prefix=os.path.join(outdir,"igprofRegressRep."))
        (th2, tfile2) = tmp.mkstemp(prefix=os.path.join(outdir,"igprofRegressRep."))
        os.close(th1)
        os.close(th2)
        os.remove(tfile1)
        os.remove(tfile2)
        ungzip2(file1,tfile1)
        ungzip2(file2,tfile2)        

        perfreport(1,tfile1,tfile2,outdir,IgProfMemOpt)

        os.remove(tfile1)
        os.remove(tfile2)
    except OSError as detail:
        raise PerfReportErr("WARNING: The OS returned the following error when comparing %s and %s\n%s" % (file1,file2,str(detail)))
        if os.path.exists(tfile1):
            os.remove(tfile1)
        if os.path.exists(tfile2):
            os.remove(tfile2)
    except IOError as detail:
        raise PerfReportErr("IOError: When comparing %s and %s using temporary files %s and %s. Error message:\n%s" % (file1,file2,tfile1,tfile2,str(detail)))
        if os.path.exists(tfile1):
            os.remove(tfile1)
        if os.path.exists(tfile2):
            os.remove(tfile2)        
Пример #24
0
def tempfilter(s, cmd):
    '''filter string S through a pair of temporary files with CMD.
    CMD is used as a template to create the real command to be run,
    with the strings INFILE and OUTFILE replaced by the real names of
    the temporary files generated.'''
    inname, outname = None, None
    try:
        infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
        fp = os.fdopen(infd, 'wb')
        fp.write(s)
        fp.close()
        outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
        os.close(outfd)
        cmd = cmd.replace('INFILE', inname)
        cmd = cmd.replace('OUTFILE', outname)
        code = os.system(cmd)
        if sys.platform == 'OpenVMS' and code & 1:
            code = 0
        if code:
            raise Abort(_("command '%s' failed: %s") %
                        (cmd, explain_exit(code)))
        return open(outname, 'rb').read()
    finally:
        try:
            if inname:
                os.unlink(inname)
        except:
            pass
        try:
            if outname:
                os.unlink(outname)
        except:
            pass
Пример #25
0
def svg_formatter(data, format) : 
    """ Generate a logo in Scalable Vector Graphics (SVG) format.
    Requires the program 'pdf2svg' be installed.
    """
    pdf = pdf_formatter(data, format)
    
    try:
        command = find_command('pdf2svg')
    except EnvironmentError:
        raise EnvironmentError("Scalable Vector Graphics (SVG) format requires the program 'pdf2svg'. "
                               "Cannot find 'pdf2svg' on search path.")

    import tempfile
    fpdfi, fname_pdf = tempfile.mkstemp(suffix=".pdf")
    fsvgi, fname_svg = tempfile.mkstemp(suffix=".svg")
    try:
        

        fpdf2 = open(fname_pdf, 'w')
        if sys.version_info[0] >= 3:
            fpdf2.buffer.write(pdf)
        else: 
            fpdf2.write(pdf)        
                    
        fpdf2.seek(0)
  
        args = [command, fname_pdf, fname_svg]
        p = Popen(args)
        (out,err) = p.communicate() 

        fsvg = open(fname_svg)
        return fsvg.read().encode()
    finally:
        os.remove(fname_svg)
        os.remove(fname_pdf)
Пример #26
0
Файл: api.py Проект: ameade/nova
def _inject_admin_password_into_fs(admin_passwd, fs):
    """Set the root password to admin_passwd

    admin_password is a root password
    fs is the path to the base of the filesystem into which to inject
    the key.

    This method modifies the instance filesystem directly,
    and does not require a guest agent running in the instance.

    """
    # The approach used here is to copy the password and shadow
    # files from the instance filesystem to local files, make any
    # necessary changes, and then copy them back.

    LOG.debug(_("Inject admin password fs=%(fs)s "
                "admin_passwd=ha-ha-not-telling-you") %
              locals())
    admin_user = '******'

    fd, tmp_passwd = tempfile.mkstemp()
    os.close(fd)
    fd, tmp_shadow = tempfile.mkstemp()
    os.close(fd)

    passwd_path = os.path.join('etc', 'passwd')
    shadow_path = os.path.join('etc', 'shadow')

    passwd_data = fs.read_file(passwd_path)
    shadow_data = fs.read_file(shadow_path)

    new_shadow_data = _set_passwd(admin_user, admin_passwd,
                                  passwd_data, shadow_data)

    fs.replace_file(shadow_path, new_shadow_data)
Пример #27
0
def dump_to_file(blurb, exit_fpr):
    """
    Dump the given blurb to a randomly generated file which contains exit_fpr.

    This function is useful to save data obtained from bad exit relays to file
    for later analysis.
    """
    if analysis_dir is None:
        fd, file_name = tempfile.mkstemp(prefix="%s_" % exit_fpr)

    else:
        try:
            os.makedirs(analysis_dir)
        except OSError as err:
            if err.errno != errno.EEXIST:
                raise
        fd, file_name = tempfile.mkstemp(prefix="%s_" % exit_fpr,
                                         dir=analysis_dir)

    try:
        with open(file_name, "w") as fd:
            fd.write(blurb)
    except IOError as err:
        log.warning("Couldn't write to \"%s\": %s" % (file_name, err))
        return None

    log.debug("Wrote %d-length blurb to file \"%s\"." %
                 (len(blurb), file_name))

    return file_name
Пример #28
0
def shellScriptInWindow(c,script):

    if sys.platform == 'darwin':
        #@        << write script to temporary MacOS file >>
        #@+node:ekr.20040915105758.22:<< write script to temporary MacOS file >>
        handle, path = tempfile.mkstemp(text=True)
        directory = c.frame.openDirectory
        script = ("cd %s\n" % directory) + script + '\n' + ("rm -f %s\n" % path)
        os.write(handle, script)
        os.close(handle)
        os.chmod(path, 0700)
        #@nonl
        #@-node:ekr.20040915105758.22:<< write script to temporary MacOS file >>
        #@nl
        os.system("open -a /Applications/Utilities/Terminal.app " + path)

    elif sys.platform == 'win32':
        g.es("shellScriptInWindow not ready for Windows",color='red')

    else:
        #@        << write script to temporary Unix file >>
        #@+node:ekr.20040915105758.25:<< write script to temporary Unix file >>
        handle, path = tempfile.mkstemp(text=True)
        directory = c.frame.openDirectory
        script = ("cd %s\n" % directory) + script + '\n' + ("rm -f %s\n" % path)
        os.write(handle, script)
        os.close(handle)
        os.chmod(path, 0700)
        #@nonl
        #@-node:ekr.20040915105758.25:<< write script to temporary Unix file >>
        #@nl
        os.system("xterm -e sh  " + path)
Пример #29
0
    def change_admin_password(self, password):
        root_logger.debug("Changing admin password")
        dirname = config_dirname(self.serverid)
        dmpwdfile = ""
        admpwdfile = ""

        try:
            (dmpwdfd, dmpwdfile) = tempfile.mkstemp(dir='/var/lib/ipa')
            os.write(dmpwdfd, self.dm_password)
            os.close(dmpwdfd)

            (admpwdfd, admpwdfile) = tempfile.mkstemp(dir='/var/lib/ipa')
            os.write(admpwdfd, password)
            os.close(admpwdfd)

            args = ["/usr/bin/ldappasswd", "-h", self.fqdn,
                    "-ZZ", "-x", "-D", str(DN(('cn', 'Directory Manager'))),
                    "-y", dmpwdfile, "-T", admpwdfile,
                    str(DN(('uid', 'admin'), ('cn', 'users'), ('cn', 'accounts'), self.suffix))]
            try:
                env = { 'LDAPTLS_CACERTDIR':os.path.dirname(CACERT),
                        'LDAPTLS_CACERT':CACERT }
                ipautil.run(args, env=env)
                root_logger.debug("ldappasswd done")
            except ipautil.CalledProcessError, e:
                print "Unable to set admin password", e
                root_logger.debug("Unable to set admin password %s" % e)

        finally:
            if os.path.isfile(dmpwdfile):
                os.remove(dmpwdfile)
            if os.path.isfile(admpwdfile):
                os.remove(admpwdfile)
Пример #30
0
def wnominate(state, session, chamber, polarity, r_bin="R",
              out_file=None):
    (fd, filename) = tempfile.mkstemp('.csv')
    with os.fdopen(fd, 'w') as out:
        vote_csv(state, session, chamber, out)

    if not out_file:
        (result_fd, out_file) = tempfile.mkstemp('.csv')
        os.close(result_fd)

    r_src_path = os.path.join(os.path.dirname(__file__), 'calc_wnominate.R')

    with open('/dev/null', 'w') as devnull:
        subprocess.check_call([r_bin, "-f", r_src_path, "--args",
                               filename, out_file, polarity],
                              stdout=devnull, stderr=devnull)

    results = {}
    with open(out_file) as f:
        c = csv.DictReader(f)
        for row in c:
            try:
                res = float(row['coord1D'])
            except ValueError:
                res = None
            results[row['leg_id']] = res

    os.remove(filename)

    return results
Пример #31
0
    def _persist(self, state, checkpoint: Checkpoint):
        dup = [
            c for c in self._checkpoints
            if c.global_step == checkpoint.global_step
        ]

        assert len(dup) <= 1

        dup = dup[0] if dup else None
        if dup is not None:
            if dup.score is not None and checkpoint.score is None:
                checkpoint.score = dup.score

        # save checkpoint
        target = Path(self._save_dir, checkpoint.filename)
        if not target.parent.exists():
            os.makedirs(str(target.parent), exist_ok=True)

        fd, name = tempfile.mkstemp(dir=self._save_dir)
        torch.save(state, name)
        os.close(fd)
        shutil.move(name, target)
        self._checkpoints.append(checkpoint)

        # remove checkpoints
        if dup is not None:
            self._checkpoints.remove(dup)
        discard = []

        evals = sorted([c for c in self._checkpoints if c.score is not None],
                       key=lambda c: c.score)
        periods = sorted([
            c
            for c in self._checkpoints if c.is_periodic and not c.end_of_epoch
        ],
                         key=lambda c: c.global_step)
        epochs = sorted(
            [c for c in self._checkpoints if c.is_periodic and c.end_of_epoch],
            key=lambda c: c.global_step)

        retains = set(evals[-self._keep_best_checkpoint_max:] +
                      periods[-self._keep_checkpoint_max:] +
                      epochs[-self._keep_epoch_checkpoint_max:])
        retains = sorted(retains, key=lambda c: c.global_step)

        discard += [c for c in self._checkpoints if c not in retains]
        discard = list(set(discard))

        for c in discard:
            path = Path(self._save_dir, c.filename)
            if path.exists():
                path.unlink()

        self._checkpoints = retains
        # save meta statistics
        with Path(self._save_dir, _META_CHECKPOINT_PATH).open('w') as w:
            w.write(
                json.dumps([c.__dict__ for c in retains],
                           indent=4,
                           sort_keys=True))
        return True
Пример #32
0
 def setUp(self):
     self.cnt += 1
     h, path = tempfile.mkstemp('.ini')
     assert QgsSettings.setGlobalSettingsPath(path)
     self.settings = QgsSettings('testqgissettings', 'testqgissettings%s' % self.cnt)
     self.globalsettings = QSettings(self.settings.globalSettingsPath(), QSettings.IniFormat)
Пример #33
0
 def __init__(self, logging_group, scratch_dir):
     super(FaultyParser, self).__init__(logging_group)
     _, self.fake_thumb = tempfile.mkstemp(suffix=".png", dir=scratch_dir)
Пример #34
0
 def __init__(self, logging_group, scratch_dir, archive_path):
     super(DummyParser, self).__init__(logging_group)
     _, self.fake_thumb = tempfile.mkstemp(suffix=".png", dir=scratch_dir)
     self.archive_path = archive_path
Пример #35
0
# scatter work units to nodes
unit = comm.scatter(work, root=0)
# ===================================
# This should be node-related work
# ===================================

# open the file on a node
f = MPI.File.Open(comm, unit, mode)
# create a buffer for the data of size f.Get_size()
ba = bytearray(f.Get_size())
# read the contents into a byte array
f.Iread(ba)
# close the file
f.Close()
# write buffer to a tempfile
descriptor, path = tempfile.mkstemp(suffix='mpi.txt')
print(path)
tf = os.fdopen(descriptor, 'w')
tf.write(ba)
tf.close()
# get contents of tempfile
contents = open(path, 'rU').read() + str(comm.Get_rank())
os.remove(path)

# ===================================
# End of node-related work
# ===================================

# gather results
result = comm.gather(contents, root=0)
# do something with result
Пример #36
0
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Tim Sutton'
__date__ = '20/08/2012'
__copyright__ = 'Copyright 2012, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = 'bbcd332a78c7149814e99b5e6a93310f5c00f016'

import qgis  # NOQA

import tempfile
import os

(myFileHandle, myFilename) = tempfile.mkstemp()
os.environ['QGIS_DEBUG'] = '2'
os.environ['QGIS_LOG_FILE'] = myFilename

from qgis.core import QgsLogger
from qgis.testing import unittest

# Convenience instances in case you may need them
# not used in this test
# from qgis.testing import start_app
# start_app()


class TestQgsLogger(unittest.TestCase):

    def testLogger(self):
Пример #37
0
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################

from __future__ import print_function, division
import numpy as np
import re, os, tempfile
from mdtraj.formats.pdb import pdbstructure
from mdtraj.formats.pdb.pdbstructure import PdbStructure
from mdtraj.testing import get_fn, eq, raises, assert_warns
from mdtraj import load, load_pdb
from mdtraj.utils import ilen
from mdtraj import Topology

pdb = get_fn('native.pdb')
fd, temp = tempfile.mkstemp(suffix='.pdb')
os.close(fd)


def teardown_module(module):
    """remove the temporary file created by tests in this file
    this gets automatically called by nose"""
    os.unlink(temp)


def test_pdbread():
    p = load(pdb)


def test_pdbwrite():
    p = load(pdb)
Пример #38
0
def temp_file(ext='tmp'):
    """Get a temporary filename with the given extension. This function
    will actually attempt to create the file."""
    tf, fn = tempfile.mkstemp(suffix='.%s' % ext)
    os.close(tf)
    return fn
Пример #39
0
def tmpfile_object():
	fdesc, path = tempfile.mkstemp()
	return os.fdopen(fdesc, 'wb'), path
Пример #40
0
"""
Tests for the config module
"""

import os
from tempfile import mkstemp

from onedrive_client.utils.config import config_load
from onedrive_client.utils.errors import OneDriveConfigException
import pytest

_BAD_FILE_PATH = mkstemp()[1]
_FILE_PATH = mkstemp()[1]
_BAD_CONTENT = ':'
_CONTENT = 'parameter: test'


# pylint: disable=no-self-use
class TestConfigLoad(object):
    """
    Test class for the load functionality
    """

    def setup_class(self):
        """
        Write two temporary files for the tests
        """

        with open(_BAD_FILE_PATH, 'w') as tmpfile:
            tmpfile.write(_BAD_CONTENT)
Пример #41
0
    os.chdir(execution_path('.'))

def teardown():
    mapnik.logger.set_severity(default_logging_severity)

def compare_map(xml):
    m = mapnik.Map(256, 256)
    absolute_base = os.path.abspath(os.path.dirname(xml))
    try:
        mapnik.load_map(m, xml, False, absolute_base)
    except RuntimeError, e:
        # only test datasources that we have installed
        if not 'Could not create datasource' in str(e):
            raise RuntimeError(str(e))
        return
    (handle, test_map) = tempfile.mkstemp(suffix='.xml', prefix='mapnik-temp-map1-')
    os.close(handle)
    (handle, test_map2) = tempfile.mkstemp(suffix='.xml', prefix='mapnik-temp-map2-')
    os.close(handle)
    if os.path.exists(test_map):
        os.remove(test_map)
    mapnik.save_map(m, test_map)
    new_map = mapnik.Map(256, 256)
    mapnik.load_map(new_map, test_map,False,absolute_base)
    open(test_map2,'w').write(mapnik.save_map_to_string(new_map))
    diff = ' diff -u %s %s' % (os.path.abspath(test_map),os.path.abspath(test_map2))
    try:
        eq_(open(test_map).read(),open(test_map2).read())
    except AssertionError, e:
        raise AssertionError('serialized map "%s" not the same after being reloaded, \ncompare with command:\n\n$%s' % (xml,diff))
Пример #42
0
def main(arguments = None):
    """ main() function, encapsulated in a method to allow for easy invokation.

    This method follows Guido van Rossum's suggestions on how to write Python
    main() functions in order to make them more flexible. By encapsulating the
    main code of the script in a function and making it take an optional
    argument the script can be called not only from other modules, but also
    from the interactive Python prompt.

    Guido van van Rossum - Python main() functions:
    http://www.artima.com/weblogs/viewpost.jsp?thread=4829

    Keyword arguments:
    arguments - the list of command line arguments passed to the script.

    """

    if arguments is None:
        arguments = sys.argv[1:]  # ignore argv[0], the script name
    (options, args) = parser.parse_args(args = arguments)

    # Adjust the logger level to WARNING, INFO or DEBUG, depending on the
    # given number of -v options (none, one or two or more, respectively)
    logging_level = logging.WARNING
    if options.verbose == 1:
        logging_level = logging.INFO
    elif options.verbose >= 2:
        logging_level = logging.DEBUG
    logging.basicConfig(format = style.LOG_FORMAT, level = logging_level)

    # Print the help and abort the execution if there are not two positional
    # arguments left after parsing the options, as the user must specify at
    # least one (only one?) input FITS file and the output JSON file.
    if len(args) < 2:
        parser.print_help()
        return 2     # 2 is generally used for command line syntax errors
    else:
        sources_img_path = args[0]
        input_paths = list(set(args[1:-1]))
        output_json_path = args[-1]

    # The execution of this module, especially when doing long-term monitoring
    # of reasonably crowded fields, may easily take several *days*. The least
    # we can do, in order to spare the end-user from insufferable grief because
    # of the waste of billions of valuable CPU cycles, is to avoid to have the
    # output file accidentally overwritten.

    if os.path.exists(output_json_path):
        if not options.overwrite:
            msg = "%sError. The output file '%s' already exists."
            print msg % (style.prefix, output_json_path)
            print style.error_exit_message
            return 1

    msg = "%sExamining the headers of the %s FITS files given as input..."
    print msg % (style.prefix, len(input_paths))

    files = fitsimage.InputFITSFiles()
    for index, img_path in enumerate(input_paths):
        img = fitsimage.FITSImage(img_path)
        pfilter = img.pfilter(options.filterk)
        files[pfilter].append(img)

        percentage = (index + 1) / len(input_paths) * 100
        methods.show_progress(percentage)

    print # progress bar doesn't include newline
    print style.prefix

    # To begin with, we need to identify the most constant stars, something for
    # which we have to do photometry on all the stars and for all the images of
    # the campaign. But fret not, as this has to be done only this time: once
    # we get the light curves of all the stars and for all the images, we will
    # be able to determine which are the most constant among them and work
    # always with this subset in order to determine which aperture and sky
    # annulus are the optimal.

    msg = "%sDoing initial photometry with FWHM-derived apertures..."
    print msg % style.prefix
    print style.prefix

    # mkstemp() returns a tuple containing an OS-level handle to an open file
    # and its absolute pathname. Thus, we need to close the file right after
    # creating it, and tell the photometry module to overwrite (-w) it.

    kwargs = dict(prefix = 'photometry_', suffix = '.LEMONdB')
    phot_db_handle, phot_db_path = tempfile.mkstemp(**kwargs)
    atexit.register(methods.clean_tmp_files, phot_db_path)
    os.close(phot_db_handle)

    basic_args = [sources_img_path] + input_paths + \
                 [phot_db_path, '--overwrite']

    phot_args = ['--maximum', options.maximum,
                 '--margin', options.margin,
                 '--cores', options.ncores,
                 '--min-sky', options.min,
                 '--objectk', options.objectk,
                 '--filterk', options.filterk,
                 '--datek', options.datek,
                 '--timek', options.timek,
                 '--expk', options.exptimek,
                 '--coaddk', options.coaddk,
                 '--gaink', options.gaink,
                 '--fwhmk', options.fwhmk,
                 '--airmk', options.airmassk]

    # The --gain and --uik options default to None, so add them to the list of
    # arguments only if they were given. Otherwise, (a) --gaink would be given
    # a value of 'None', a string, that would result in an error when optparse
    # attempted to convert it to float, and (b) --uik would understood 'None'
    # as the name of the keyword storing the path to the uncalibrated image.

    if options.gain:
        phot_args += ['--gain', options.gain]

    if options.uncimgk:
        phot_args += ['--uncimgk', options.uncimgk]

    # Pass as many '-v' options as we have received here
    [phot_args.append('-v') for x in xrange(options.verbose)]

    extra_args = ['--aperture', options.aperture,
                  '--annulus', options.annulus,
                  '--dannulus', options.dannulus]

    # Non-zero return codes raise subprocess.CalledProcessError
    args = basic_args + phot_args + extra_args
    check_run(photometry.main, [str(a) for a in args])

    # Now we need to compute the light curves and find those that are most
    # constant. This, of course, has to be done for each filter, as a star
    # identified as constant in Johnson I may be too faint in Johnson B, for
    # example. In other words: we need to calculate the light curve of each
    # star and for each filter, and then determine which are the
    # options.nconstant stars with the lowest standard deviation.

    print style.prefix
    msg = "%sGenerating light curves for initial photometry."
    print msg % style.prefix
    print style.prefix

    kwargs = dict(prefix = 'diffphot_', suffix = '.LEMONdB')
    diffphot_db_handle, diffphot_db_path = tempfile.mkstemp(**kwargs)
    atexit.register(methods.clean_tmp_files, diffphot_db_path)
    os.close(diffphot_db_handle)

    diff_args = [phot_db_path,
                 '--output', diffphot_db_path, '--overwrite',
                 '--cores', options.ncores,
                 '--minimum-images', options.min_images,
                 '--stars', options.nconstant,
                 '--minimum-stars', options.min_cstars,
                 '--pct', options.pct,
                 '--weights-threshold', options.wminimum,
                 '--max-iters', options.max_iters,
                 '--worst-fraction', options.worst_fraction]

    [diff_args.append('-v') for x in xrange(options.verbose)]

    check_run(diffphot.main, [str(a) for a in diff_args])
    print style.prefix

    # Map each photometric filter to the path of the temporary file where the
    # right ascension and declination of each constant star, one per line, will
    # be saved. This file is from now on passed, along with the --coordinates
    # option, to photometry.main(), so that photometry is not done on all the
    # astronomical objects, but instead exclusively on these ones.

    coordinates_files = {}

    miner = mining.LEMONdBMiner(diffphot_db_path)
    for pfilter in miner.pfilters:

        # LEMONdBMiner.sort_by_curve() returns a list of two-element tuples,
        # mapping the ID of each star to the standard deviation of its light
        # curve in this photometric filter. The list is sorted in increasing
        # order by the standard deviation. We are only interested in the first
        # 'options.nconstant', needing at least 'options.pminimum'.

        msg = "%sIdentifying the %d most constant stars for the %s filter..."
        args = style.prefix, options.nconstant, pfilter
        print msg % args ,
        sys.stdout.flush()

        kwargs = dict(minimum = options.min_images)
        stars_stdevs = miner.sort_by_curve_stdev(pfilter, **kwargs)
        cstars = stars_stdevs[:options.nconstant]

        if len(cstars) < options.pminimum:
            msg = ("fewer than %d stars identified as constant in the "
                   "initial photometry for the %s filter")
            args = options.pminimum, pfilter
            raise NotEnoughConstantStars(msg % args)
        else:
            print 'done.'

        if len(cstars) < options.nconstant:
            msg = "%sBut only %d stars were available. Using them all, anyway."
            print msg % (style.prefix, len(cstars))

        # Replacing whitespaces with underscores is easier than having to quote
        # the path to the --coordinates file if the name of the filter contains
        # them (otherwise, optparse would only see up to the first whitespace).
        prefix = '%s_' % str(pfilter).replace(' ', '_')
        kwargs = dict(prefix = prefix, suffix = '.coordinates')
        coords_fd, coordinates_files[pfilter] = tempfile.mkstemp(**kwargs)
        atexit.register(methods.clean_tmp_files, coordinates_files[pfilter])

        # LEMONdBMiner.get_star() returns a five-element tuple with the x and y
        # coordinates, right ascension, declination and instrumental magnitude
        # of the astronomical object in the sources image.
        for star_id, _ in cstars:
            ra, dec = miner.get_star(star_id)[2:4]
            os.write(coords_fd, "%.10f\t%.10f\n" % (ra, dec))
        os.close(coords_fd)

        msg = "%sStar coordinates for %s temporarily saved to %s"
        print msg % (style.prefix, pfilter, coordinates_files[pfilter])

    # The constant astronomical objects, the only ones to which we will pay
    # attention from now on, have been identified. So far, so good. Now we
    # generate the light curves of these objects for each candidate set of
    # photometric parameters. We store the evaluated values in a dictionary in
    # which each filter maps to a list of json_parse.CandidateAnnuli objects.

    evaluated_annuli = collections.defaultdict(list)

    for pfilter, coords_path in coordinates_files.iteritems():

        print style.prefix
        msg = "%sFinding the optimal photometric parameters for the %s filter."
        print msg % (style.prefix, pfilter)

        if len(files[pfilter]) < options.min_images:
            msg = "fewer than %d images (--minimum-images option) for %s"
            args = options.min_images, pfilter
            raise NotEnoughConstantStars(msg % args)

        # The median FWHM of the images is needed in order to calculate the
        # range of apertures that we need to evaluate for this filter.

        msg = "%sCalculating the median FWHM for this filter..."
        print msg % style.prefix ,

        pfilter_fwhms = []
        for img in files[pfilter]:
            img_fwhm = photometry.get_fwhm(img, options)
            logging.debug("%s: FWHM = %.3f" % (img.path, img_fwhm))
            pfilter_fwhms.append(img_fwhm)

        fwhm = numpy.median(pfilter_fwhms)
        print ' done.'

        # FWHM to range of pixels conversion
        min_aperture = fwhm * options.lower
        max_aperture = fwhm * options.upper
        annulus      = fwhm * options.sky
        dannulus     = fwhm * options.width

        # The dimensions of the sky annulus remain fixed, while the
        # aperture is in the range [lower * FWHM, upper FWHM], with
        # increments of options.step pixels.
        filter_apertures = numpy.arange(min_aperture, max_aperture, options.step)
        assert filter_apertures[0] == min_aperture

        msg = "%sFWHM (%s passband) = %.3f pixels, therefore:"
        print msg % (style.prefix, pfilter, fwhm)
        msg = "%sAperture radius, minimum = %.3f x %.2f = %.3f pixels "
        print msg % (style.prefix, fwhm, options.lower, min_aperture)
        msg = "%sAperture radius, maximum = %.3f x %.2f = %.3f pixels "
        print msg % (style.prefix, fwhm, options.upper, max_aperture)
        msg = "%sAperture radius, step = %.2f pixels, which means that:"
        print msg % (style.prefix, options.step)

        msg = "%sAperture radius, actual maximum = %.3f + %d x %.2f = %.3f pixels"
        args = (style.prefix, min_aperture, len(filter_apertures),
                options.step, max(filter_apertures))
        print msg % args

        msg = "%sSky annulus, inner radius = %.3f x %.2f = %.3f pixels"
        print msg % (style.prefix, fwhm, options.sky, annulus)
        msg = "%sSky annulus, width = %.3f x %.2f = %.3f pixels"
        print msg % (style.prefix, fwhm, options.width, dannulus)

        msg = "%s%d different apertures in the range [%.2f, %.2f] to be evaluated:"
        args = (style.prefix, len(filter_apertures),
                filter_apertures[0], filter_apertures[-1])
        print msg % args

        # For each candidate aperture, and only with the images taken in
        # this filter, do photometry on the constant stars and compute the
        # median of the standard deviation of their light curves as a means
        # of evaluating the suitability of this combination of parameters.
        for index, aperture in enumerate(filter_apertures):

            print style.prefix

            kwargs = dict(prefix = 'photometry_', suffix = '.LEMONdB')
            fd, aper_phot_db_path = tempfile.mkstemp(**kwargs)
            atexit.register(methods.clean_tmp_files, aper_phot_db_path)
            os.close(fd)

            paths = [img.path for img in files[pfilter]]
            basic_args = [sources_img_path] + paths + \
                         [aper_phot_db_path, '--overwrite']

            extra_args = ['--filter', str(pfilter),
                          '--coordinates', coords_path,
                          '--aperture-pix', aperture,
                          '--annulus-pix', annulus,
                          '--dannulus-pix', dannulus]

            args = basic_args + phot_args + extra_args
            check_run(photometry.main, [str(a) for a in args])

            kwargs = dict(prefix = 'diffphot_', suffix = '.LEMONdB')
            fd, aper_diff_db_path = tempfile.mkstemp(**kwargs)
            atexit.register(methods.clean_tmp_files, aper_diff_db_path)
            os.close(fd)

            # Reuse the arguments used earlier for diffphot.main(). We only
            # need to change the first argument (path to the input LEMONdB)
            # and the third one (path to the output LEMONdB)
            diff_args[0] = aper_phot_db_path
            diff_args[2] = aper_diff_db_path
            check_run(diffphot.main, [str(a) for a in diff_args])

            miner = mining.LEMONdBMiner(aper_diff_db_path)

            try:
                kwargs = dict(minimum = options.min_images)
                cstars = miner.sort_by_curve_stdev(pfilter, **kwargs)
            except mining.NoStarsSelectedError:
                # There are no light curves with at least options.min_images points.
                # Therefore, much to our sorrow, we cannot evaluate this aperture.
                msg = "%sNo constant stars for this aperture. Ignoring it..."
                print msg % style.prefix
                continue

            # There must be at most 'nconstant' stars, but there may be fewer
            # if this aperture causes one or more of the constant stars to be
            # too faint (INDEF) in so many images as to prevent their lights
            # curve from being computed.
            assert len(cstars) <= options.nconstant

            if len(cstars) < options.pminimum:
                msg = ("%sJust %d constant stars, fewer than the allowed "
                       "minimum of %d, had their light curves calculated "
                       "for this aperture. Ignoring it...")
                args = style.prefix, len(cstars), options.pminimum
                print style.prefix
                continue

            # 'cstars' contains two-element tuples: (ID, stdev)
            stdevs_median = numpy.median([x[1] for x in cstars])
            params = (aperture, annulus, dannulus, stdevs_median)
            # NumPy floating-point data types are not JSON serializable
            args = (float(x) for x in params)
            candidate = json_parse.CandidateAnnuli(*args)
            evaluated_annuli[pfilter].append(candidate)

            msg = "%sAperture = %.3f, median stdev (%d stars) = %.4f"
            args = style.prefix, aperture, len(cstars), stdevs_median
            print msg % args

            percentage = (index + 1) / len(filter_apertures) * 100
            msg = "%s%s progress: %.2f %%"
            args = style.prefix, pfilter, percentage
            print msg % args

        # Let the user know of the best 'annuli', that is, the one for
        # which the standard deviation of the constant stars is minimal
        kwargs = dict(key = operator.attrgetter('stdev'))
        best_candidate = min(evaluated_annuli[pfilter], **kwargs)

        msg = "%sBest aperture found at %.3f pixels with stdev = %.4f"
        args = style.prefix, best_candidate.aperture, best_candidate.stdev
        print msg % args

    print style.prefix
    msg = "%sSaving the evaluated apertures to the '%s' JSON file ..."
    print msg % (style.prefix, output_json_path) ,
    json_parse.CandidateAnnuli.dump(evaluated_annuli, output_json_path)
    print ' done.'

    print "%sYou're done ^_^" % style.prefix
    return 0
Пример #43
0
 def test_default(self):
     sf = SolverFactory()
     solve(sf, sf.Default, "examples/normal.png", tempfile.mkstemp())
def handle_file_post(req, allowed_mimetypes=None):
    """
    Handle the POST of a file.
    @return: the a tuple with th full path to the file saved on disk,
    and it's mimetype as provided by the request.
    @rtype: (string, string)
    """
    from invenio.bibdocfile import decompose_file, md5
    ## We retrieve the length
    clen = req.headers_in["Content-Length"]
    if clen is None:
        raise InvenioWebInterfaceWSGIContentLenghtError("Content-Length header is missing")
    try:
        clen = int(clen)
        assert (clen > 1)
    except (ValueError, AssertionError):
        raise InvenioWebInterfaceWSGIContentLenghtError("Content-Length header should contain a positive integer")
    ## Let's take the content type
    ctype = req.headers_in["Content-Type"]
    if allowed_mimetypes and ctype not in allowed_mimetypes:
        raise InvenioWebInterfaceWSGIContentTypeError("Content-Type not in allowed list of content types: %s" % allowed_mimetypes)
    ## Let's optionally accept a suggested filename
    suffix = prefix = ''
    g = RE_CDISPOSITION_FILENAME.search(req.headers_in.get("Content-Disposition", ""))
    if g:
        dummy, prefix, suffix = decompose_file(g.group("filename"))
    ## Let's optionally accept an MD5 hash (and use it later for comparison)
    cmd5 = req.headers_in["Content-MD5"]
    if cmd5:
        the_md5 = md5()

    ## Ok. We can initialize the file
    fd, path = tempfile.mkstemp(suffix=suffix, prefix=prefix, dir=CFG_TMPDIR)
    the_file = os.fdopen(fd, 'w')
    ## Let's read the file
    while True:
        chunk = req.read(max(10240, clen))
        if len(chunk) < clen:
            ## We expected to read at least clen (which is different than 0)
            ## but chunk was shorter! Gosh! Error! Panic!
            the_file.close()
            os.close(fd)
            os.remove(path)
            raise InvenioWebInterfaceWSGIContentLenghtError("File shorter than what specified in Content-Length")
        if cmd5:
            ## MD5 was in the header let's compute it
            the_md5.update(chunk)
        ## And let's definitively write the content to disk :-)
        the_file.write(chunk)
        clen -= len(chunk)
        if clen == 0:
            ## That's it. Everything was read.
            break
    if cmd5 and the_md5.hexdigest().lower() != cmd5.strip().lower():
        ## Let's check the MD5
        the_file.close()
        os.close(fd)
        os.remove(path)
        raise InvenioWebInterfaceWSGIContentMD5Error("MD5 checksum does not match")
    ## Let's clean everything up
    the_file.close()
    return (path, ctype)
Пример #45
0
 def setUp(self):
     self.fd, self.path = tempfile.mkstemp()
Пример #46
0
    def process_batch(self):
        """Save the HTML file."""
        ok_count = 0
        fail_count = 0
        old_count = 0
        remote_count = 0

        try:
            my_host = socket.gethostname().split(".")[0]
        except Exception:
            my_host = socket.gethostname()

        try:
            temp_file = tempfile.mkstemp()
            file_handle = os.fdopen(temp_file[0], "w")
            file_name = temp_file[1]
        except Exception:
            sys.stderr.write(
                "Couldn't create temporary file for HTML output\n")
            return

        output_ok = StringIO()
        output_fail = StringIO()

        keys = list(self.batch_data.keys())
        keys.sort()
        for entry in keys:
            if self.batch_data[entry]["age"] > 120:
                status = "OLD"
                old_count += 1
            elif self.batch_data[entry]["status"]:
                status = "OK"
                ok_count += 1
            else:
                status = "FAIL"
                fail_count += 1
            if self.batch_data[entry]["host"] != my_host:
                remote_count += 1
            try:
                monitor_name = entry.split("/")[1]
            except Exception:
                monitor_name = entry
            if status == "FAIL":
                output = output_fail
            else:
                output = output_ok
            output.write("<tr class=\"%srow\">" % status.lower())
            output.write("""
            <td class="monitor_name">%s</td>
            <td class="status %s">%s</td>
            <td>%s</td>
            <td>%s</td>
            """ % (
                monitor_name,
                status.lower(),
                status,
                self.batch_data[entry]["host"],
                self.batch_data[entry]["fail_time"],
            ))
            if self.batch_data[entry]["fail_count"] == 0:
                output.write("<td class=\"vfc\">&nbsp;</td>")
            else:
                output.write("<td class=\"vfc\">%s</td>" %
                             self.batch_data[entry]["fail_count"])
            try:
                output.write("<td>%d+%02d:%02d:%02d</td>" %
                             (self.batch_data[entry]["downtime"][0],
                              self.batch_data[entry]["downtime"][1],
                              self.batch_data[entry]["downtime"][2],
                              self.batch_data[entry]["downtime"][3]))
            except Exception:
                output.write("<td>&nbsp;</td>")
            output.write("<td>%s &nbsp;</td>" %
                         (self.batch_data[entry]["fail_data"]))
            if self.batch_data[entry]["failures"] == 0:
                output.write("<td></td><td></td>")
            else:
                output.write("""<td>%s</td>
                <td>%s</td>""" % (self.batch_data[entry]["failures"],
                                  self.format_datetime(
                                      self.batch_data[entry]["last_failure"])))
            if self.batch_data[entry]["host"] == my_host:
                output.write("<td></td>")
            else:
                output.write("<td>%d</td>" % self.batch_data[entry]["age"])
            output.write("</tr>\n")
        count_data = "<div id=\"summary\""
        if old_count > 0:
            cls = "old"
        elif fail_count > 0:
            cls = "fail"
        else:
            cls = "ok"

        count_data = count_data + " class=\"%s\">%s" % (cls, cls.upper())
        self.count_data = count_data + "<div id=\"details\"><span class=\"ok\">%d OK</span> <span class=\"fail\">%d FAIL</span> <span class=\"old\">%d OLD</span> <span class=\"remote\">%d remote</span></div></div>" % (
            ok_count, fail_count, old_count, remote_count)

        self.status = cls.upper()

        with open(os.path.join(self.folder, self.header), "r") as file_input:
            file_handle.writelines(self.parse_file(file_input))

        file_handle.write(output_fail.getvalue())
        file_handle.write(output_ok.getvalue())

        with open(os.path.join(self.folder, self.footer), "r") as file_input:
            file_handle.writelines(self.parse_file(file_input))

        try:
            file_handle.flush()
            file_handle.close()
            os.chmod(
                file_name,
                stat.S_IREAD | stat.S_IWRITE | stat.S_IRGRP | stat.S_IROTH)
            shutil.move(file_name, os.path.join(self.folder, self.filename))
        except Exception as e:
            print("problem closing temporary file for HTML output %s" % e)
Пример #47
0
    def _run_wkhtmltopdf(self, cr, uid, headers, footers, bodies, landscape, paperformat, spec_paperformat_args=None, save_in_attachment=None, set_viewport_size=False):
        """Execute wkhtmltopdf as a subprocess in order to convert html given in input into a pdf
        document.

        :param header: list of string containing the headers
        :param footer: list of string containing the footers
        :param bodies: list of string containing the reports
        :param landscape: boolean to force the pdf to be rendered under a landscape format
        :param paperformat: ir.actions.report.paperformat to generate the wkhtmltopf arguments
        :param specific_paperformat_args: dict of prioritized paperformat arguments
        :param save_in_attachment: dict of reports to save/load in/from the db
        :returns: Content of the pdf as a string
        """
        if not save_in_attachment:
            save_in_attachment = {}

        command_args = []
        if set_viewport_size:
            command_args.extend(['--viewport-size', landscape and '1024x1280' or '1280x1024'])

        # Passing the cookie to wkhtmltopdf in order to resolve internal links.
        try:
            if request:
                command_args.extend(['--cookie', 'session_id', request.session.sid])
        except AttributeError:
            pass

        # Wkhtmltopdf arguments
        command_args.extend(['--quiet'])  # Less verbose error messages
        if paperformat:
            # Convert the paperformat record into arguments
            command_args.extend(self._build_wkhtmltopdf_args(paperformat, spec_paperformat_args))

        # Force the landscape orientation if necessary
        if landscape and '--orientation' in command_args:
            command_args_copy = list(command_args)
            for index, elem in enumerate(command_args_copy):
                if elem == '--orientation':
                    del command_args[index]
                    del command_args[index]
                    command_args.extend(['--orientation', 'landscape'])
        elif landscape and '--orientation' not in command_args:
            command_args.extend(['--orientation', 'landscape'])

        # Execute WKhtmltopdf
        pdfdocuments = []
        temporary_files = []

        for index, reporthtml in enumerate(bodies):
            local_command_args = []
            pdfreport_fd, pdfreport_path = tempfile.mkstemp(suffix='.pdf', prefix='report.tmp.')
            temporary_files.append(pdfreport_path)

            # Directly load the document if we already have it
            if save_in_attachment and save_in_attachment['loaded_documents'].get(reporthtml[0]):
                with closing(os.fdopen(pdfreport_fd, 'w')) as pdfreport:
                    pdfreport.write(save_in_attachment['loaded_documents'][reporthtml[0]])
                pdfdocuments.append(pdfreport_path)
                continue
            else:
                os.close(pdfreport_fd)

            # Wkhtmltopdf handles header/footer as separate pages. Create them if necessary.
            if headers:
                head_file_fd, head_file_path = tempfile.mkstemp(suffix='.html', prefix='report.header.tmp.')
                temporary_files.append(head_file_path)
                with closing(os.fdopen(head_file_fd, 'w')) as head_file:
                    head_file.write(headers[index])
                local_command_args.extend(['--header-html', head_file_path])
            if footers:
                foot_file_fd, foot_file_path = tempfile.mkstemp(suffix='.html', prefix='report.footer.tmp.')
                temporary_files.append(foot_file_path)
                with closing(os.fdopen(foot_file_fd, 'w')) as foot_file:
                    foot_file.write(footers[index])
                local_command_args.extend(['--footer-html', foot_file_path])

            # Body stuff
            content_file_fd, content_file_path = tempfile.mkstemp(suffix='.html', prefix='report.body.tmp.')
            temporary_files.append(content_file_path)
            with closing(os.fdopen(content_file_fd, 'w')) as content_file:
                content_file.write(reporthtml[1])

            try:
                wkhtmltopdf = [_get_wkhtmltopdf_bin()] + command_args + local_command_args
                wkhtmltopdf += [content_file_path] + [pdfreport_path]
                process = subprocess.Popen(wkhtmltopdf, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
                out, err = process.communicate()

                if process.returncode not in [0, 1]:
                    raise UserError(_('Wkhtmltopdf failed (error code: %s). '
                                      'Message: %s') % (str(process.returncode), err))

                # Save the pdf in attachment if marked
                if reporthtml[0] is not False and save_in_attachment.get(reporthtml[0]):
                    with open(pdfreport_path, 'rb') as pdfreport:
                        attachment = {
                            'name': save_in_attachment.get(reporthtml[0]),
                            'datas': base64.encodestring(pdfreport.read()),
                            'datas_fname': save_in_attachment.get(reporthtml[0]),
                            'res_model': save_in_attachment.get('model'),
                            'res_id': reporthtml[0],
                        }
                        try:
                            self.pool['ir.attachment'].create(cr, uid, attachment)
                        except AccessError:
                            _logger.info("Cannot save PDF report %r as attachment", attachment['name'])
                        else:
                            _logger.info('The PDF document %s is now saved in the database',
                                         attachment['name'])

                pdfdocuments.append(pdfreport_path)
            except:
                raise

        # Return the entire document
        if len(pdfdocuments) == 1:
            entire_report_path = pdfdocuments[0]
        else:
            entire_report_path = self._merge_pdf(pdfdocuments)
            temporary_files.append(entire_report_path)

        with open(entire_report_path, 'rb') as pdfdocument:
            content = pdfdocument.read()

        # Manual cleanup of the temporary files
        for temporary_file in temporary_files:
            try:
                os.unlink(temporary_file)
            except (OSError, IOError):
                _logger.error('Error when trying to remove file %s' % temporary_file)

        return content
Пример #48
0
    def _download_updates(self):
        updates_filename = 'jenkins-plugin-cache.json'
        updates_dir = os.path.expanduser('~/.ansible/tmp')
        updates_file = "%s/%s" % (updates_dir, updates_filename)
        download_updates = True

        # Check if we need to download new updates file
        if os.path.isfile(updates_file):
            # Get timestamp when the file was changed last time
            ts_file = os.stat(updates_file).st_mtime
            ts_now = time.time()

            if ts_now - ts_file < self.params['updates_expiration']:
                download_updates = False

        updates_file_orig = updates_file

        # Download the updates file if needed
        if download_updates:
            url = "%s/update-center.json" % self.params['updates_url']

            # Get the data
            r = self._get_url_data(
                url,
                msg_status="Remote updates not found.",
                msg_exception="Updates download failed.")

            # Write the updates file
            update_fd, updates_file = tempfile.mkstemp()
            os.write(update_fd, r.read())

            try:
                os.close(update_fd)
            except IOError:
                e = get_exception()
                self.module.fail_json(
                    msg="Cannot close the tmp updates file %s." % updates_file,
                    details=to_native(e))

        # Open the updates file
        try:
            f = open(updates_file)
        except IOError:
            e = get_exception()
            self.module.fail_json(
                msg="Cannot open temporal updates file.",
                details=to_native(e))

        i = 0
        for line in f:
            # Read only the second line
            if i == 1:
                try:
                    data = json.loads(line)
                except Exception:
                    e = get_exception()
                    self.module.fail_json(
                        msg="Cannot load JSON data from the tmp updates file.",
                        details=e.message)

                break

            i += 1

        # Move the updates file to the right place if we could read it
        if download_updates:
            # Make sure the destination directory exists
            if not os.path.isdir(updates_dir):
                try:
                    os.makedirs(updates_dir, int('0700', 8))
                except OSError:
                    e = get_exception()
                    self.module.fail_json(
                        msg="Cannot create temporal directory.",
                        details=e.message)

            self.module.atomic_move(updates_file, updates_file_orig)

        # Check if we have the plugin data available
        if 'plugins' not in data or self.params['name'] not in data['plugins']:
            self.module.fail_json(
                msg="Cannot find plugin data in the updates file.")

        return data['plugins'][self.params['name']]
Пример #49
0
    def format(self, edit, selection=None):
        """
        Format selection (if None then formats the entire document).

        Returns region containing the reformatted text.
        """
        # determine selection to format
        if not selection:
            selection = sublime.Region(0, self.view.size())
        self.debug('Formatting selection %r', selection)

        # retrieve selected text & dedent
        text = self.view.substr(selection)
        text, indent, trailing_nl = dedent_text(text)
        self.debug('Detected indent %r', indent)

        # encode text
        try:
            encoded_text = text.encode(self.encoding)
        except UnicodeEncodeError as err:
            msg = (
                "You may need to re-open this file with a different encoding."
                " Current encoding is %r." % self.encoding)
            self.error("UnicodeEncodeError: %s\n\n%s", err, msg)
            return

        # pass source code to be formatted on stdin?
        if self.get_setting("use_stdin"):
            # run yapf
            self.debug('Running %s in %s', self.popen_args, self.popen_cwd)
            try:
                popen = subprocess.Popen(self.popen_args,
                                         stdout=subprocess.PIPE,
                                         stderr=subprocess.PIPE,
                                         stdin=subprocess.PIPE,
                                         cwd=self.popen_cwd,
                                         env=self.popen_env,
                                         startupinfo=self.popen_startupinfo)
            except OSError as err:
                # always show error in popup
                msg = "You may need to install YAPF and/or configure 'yapf_command' in PyYapf's Settings."
                sublime.error_message("OSError: %s\n\n%s" % (err, msg))
                return
            encoded_stdout, encoded_stderr = popen.communicate(encoded_text)
            text = encoded_stdout.decode(self.encoding)
        else:
            # do _not_ use stdin. this avoids a unicode defect in yapf, see
            # https://github.com/google/yapf/pull/145. the downside is that
            # .style.yapf / setup.cfg configuration is not picked up properly,
            # see https://github.com/jason-kane/PyYapf/issues/36.
            # we may permanently use stdin and remove the use_stdin option and
            # this code once the upstream bug is fixed.
            file_obj, temp_filename = tempfile.mkstemp(suffix=".py")
            try:
                temp_handle = os.fdopen(file_obj, 'wb' if SUBLIME_3 else 'w')
                temp_handle.write(encoded_text)
                temp_handle.close()
                self.popen_args += ["--in-place", temp_filename]

                self.debug('Running %s in %s', self.popen_args, self.popen_cwd)
                try:
                    popen = subprocess.Popen(
                        self.popen_args,
                        stdout=subprocess.PIPE,
                        stderr=subprocess.PIPE,
                        cwd=self.popen_cwd,
                        env=self.popen_env,
                        startupinfo=self.popen_startupinfo)
                except OSError as err:
                    # always show error in popup
                    msg = "You may need to install YAPF and/or configure 'yapf_command' in PyYapf's Settings."
                    sublime.error_message("OSError: %s\n\n%s" % (err, msg))
                    return

                encoded_stdout, encoded_stderr = popen.communicate()

                if SUBLIME_3:
                    open_encoded = open
                else:
                    import codecs
                    open_encoded = codecs.open

                with open_encoded(temp_filename, encoding=self.encoding) as fp:
                    text = fp.read()
            finally:
                os.unlink(temp_filename)

        self.debug('Exit code %d', popen.returncode)

        # handle errors (since yapf>=0.3, exit code 2 means changed, not error)
        if popen.returncode not in (0, 2):
            stderr = encoded_stderr.decode(self.encoding)
            stderr = stderr.replace(os.linesep, '\n')
            self.debug('Error:\n%s', stderr)

            # report error
            err_lines = stderr.splitlines()
            msg = err_lines[-1]
            self.error('%s', msg)

            # attempt to highlight line where error occurred
            rel_line = parse_error_line(err_lines)
            if rel_line:
                line = self.view.rowcol(selection.begin())[0]
                pt = self.view.text_point(line + rel_line - 1, 0)
                region = self.view.line(pt)
                self.view.add_regions(KEY, [region], KEY, 'cross', ERROR_FLAGS)
            return

        # adjust newlines (only necessary when use_stdin is True, since
        # [codecs.]open uses universal newlines by default)
        text = text.replace(os.linesep, '\n')

        # re-indent and replace text
        text = indent_text(text, indent, trailing_nl)
        self.view.replace(edit, selection, text)

        # return region containing modified text
        if selection.a <= selection.b:
            return sublime.Region(selection.a, selection.a + len(text))
        else:
            return sublime.Region(selection.b + len(text), selection.b)
Пример #50
0
def backup_autosave_file(path, content):
    handle, temp_path = tempfile.mkstemp()
    with open(temp_path, 'w') as f:
        f.write(content)
    print('A backup of %s has been saved here: %s' % (path, temp_path))
Пример #51
0
 def setUp(self):
     self.var_name = 'TRMM_3B42_daily_precipitation_V7'
     _, self.mfst_file_name = tempfile.mkstemp()
Пример #52
0
def mktemp(dir=None):
    """Create a temp file, known by name, in a semi-secure manner."""
    handle, filename = mkstemp(dir=dir)
    os.close(handle)
    return filename
Пример #53
0
        sys.stderr.write("fork #2 failed: (%d) %s\n" % (e.errorno, e.strerror))
        sys.exit(1)

    def atexit_cb():
        try:
            os.remove(pidfile)
        except OSError:
            pass

    atexit.register(atexit_cb)

    try:
        if not os.path.exists(os.path.dirname(pidfile)):
            os.mkdir(os.path.dirname(pidfile))

        fd, nm = tempfile.mkstemp(dir=os.path.dirname(pidfile))
        os.write(fd, '%d\n' % os.getpid())
        os.close(fd)
        os.rename(nm, pidfile)
    except:
        raise

    for f in sys.stdout, sys.stderr:
        f.flush()

    si = file(stdin, 'r')
    so = file(stdout, 'a+')
    se = file(stderr, 'a+', 0)
    if hasattr(sys.stdin, "fileno"):
        os.dup2(si.fileno(), sys.stdin.fileno())
    if hasattr(sys.stdout, "fileno"):
Пример #54
0
    def configure(ctx, self, target=None, arch=None, plat=None, options=[],
                  force_configure=False):
        """
        Configure a Unikraft application.
        """

        if not self.is_configured():
            self.init()

        if target is not None and isinstance(target, Target):
            arch = target.architecture
            plat = target.platform

        archs = list()
        plats = list()

        def match_arch(arch, target):
            if isinstance(arch, six.string_types) and \
                    arch == target.architecture.name:
                return target.architecture
            if isinstance(arch, Architecture) and \
                    arch.name == target.architecture.name:
                return arch
            return None

        def match_plat(plat, target):
            if isinstance(plat, six.string_types) and \
                    plat == target.platform.name:
                return target.platform
            if isinstance(plat, Platform) and \
                    plat.name == target.platform.name:
                return plat
            return None

        if len(self.config.targets.all()) == 1 \
                and target is None and arch is None and plat is None:
            target = self.config.targets.all()[0]
            archs.append(target.architecture)
            plats.append(target.platform)

        else:
            for t in self.config.targets.all():
                if match_arch(arch, t) is not None \
                        and match_plat(plat, t) is not None:
                    archs.append(t.architecture)
                    plats.append(t.platform)

        # Generate a dynamic .config to populate defconfig with based on
        # configure's parameterization.
        dotconfig = list()
        dotconfig.extend(self.config.unikraft.kconfig or [])

        for arch in archs:
            if not arch.is_downloaded:
                raise MissingComponent(arch.name)

            dotconfig.extend(arch.kconfig)
            dotconfig.append(arch.kconfig_enabled_flag)

        for plat in plats:
            if not plat.is_downloaded:
                raise MissingComponent(plat.name)

            dotconfig.extend(plat.kconfig)
            dotconfig.append(plat.kconfig_enabled_flag)

        for lib in self.config.libraries.all():
            if not lib.is_downloaded:
                raise MissingComponent(lib.name)

            dotconfig.extend(lib.kconfig)
            dotconfig.append(lib.kconfig_enabled_flag)

        # Add any additional confguration options, and overriding existing
        # configuraton options.
        for new_opt in options:
            o = new_opt.split('=')
            for exist_opt in dotconfig:
                e = exist_opt.split('=')
                if o[0] == e[0]:
                    dotconfig.remove(exist_opt)
                    break
            dotconfig.append(new_opt)

        # Create a temporary file with the kconfig written to it
        fd, path = tempfile.mkstemp()

        with os.fdopen(fd, 'w+') as tmp:
            logger.debug('Using the following defconfig:')
            for line in dotconfig:
                logger.debug(' > ' + line)
                tmp.write(line + '\n')

        return_code = 0

        try:
            return_code = self.make([
                ('UK_DEFCONFIG=%s' % path),
                'defconfig'
            ])
        finally:
            os.remove(path)

        return return_code
Пример #55
0
    def _handler(self, request, response):
        init_process_logger('log.txt')
        response.outputs['output_log'].file = 'log.txt'

        response.update_status('Start process', 0)

        try:
            LOGGER.info('reading the arguments')
            resources = archiveextract(
                resource=rename_complexinputs(request.inputs['resource']))
            taxon_name = request.inputs['taxon_name'][0].data
            bbox = [-180, -90, 180, 90]
            # bbox_obj = self.BBox.getValue()
            # bbox = [bbox_obj.coords[0][0],
            #         bbox_obj.coords[0][1],
            #         bbox_obj.coords[1][0],
            #         bbox_obj.coords[1][1]]
            period = request.inputs['period']
            period = period[0].data
            indices = [inpt.data for inpt in request.inputs['indices']]
            archive_format = request.inputs['archive_format'][0].data
            LOGGER.exception("indices = {} for {}".format(indices, taxon_name))
            LOGGER.info("bbox={}".format(bbox))
        except Exception as ex:
            msg = 'failed to read in the arguments: {}'.format(str(ex))
            LOGGER.exception(msg)
            raise Exception(msg)

        LOGGER.info('indices {}'.format(indices))

        try:
            response.update_status('Fetching GBIF Data', 10)
            gbifdic = sdm.get_gbif(taxon_name, bbox=bbox)
            LOGGER.info('Fetched GBIF data')
        except Exception as ex:
            msg = 'failed to search gbif: {}'.format(str(ex))
            LOGGER.exception(msg)
            raise Exception(msg)

        try:
            response.update_status('write csv file', 70)
            gbifcsv = sdm.gbifdic2csv(gbifdic)
            LOGGER.info('GBIF data written to file')
        except Exception as ex:
            msg = 'failed to write csv file: {}'.format(str(ex))
            LOGGER.exception(msg)
            raise Exception(msg)

        try:
            response.update_status('plot map', 80)
            latlon = sdm.latlon_gbifdic(gbifdic)
            occurence_map = map_gbifoccurrences(latlon)
        except Exception as ex:
            msg = 'failed to plot occurence map: {}'.format(str(ex))
            LOGGER.exception(msg)
            raise Exception(msg)

        #################################
        # calculate the climate indices
        #################################

        # get the indices
        ncs_indices = None
        try:
            response.update_status('start calculation of climate indices for {}'.format(indices), 30)

            ncs_indices = sdm.get_indices(resource=resources, indices=indices)
            LOGGER.info('indice calculation done')
        except Exception as ex:
            msg = 'failed to calculate indices: {}'.format(str(ex))
            LOGGER.exception(msg)
            raise Exception(msg)

        try:
            # sort indices
            indices_dic = sdm.sort_indices(ncs_indices)
            LOGGER.info('indice files sorted in dictionary')
        except Exception as ex:
            msg = 'failed to sort indices: {}'.format(str(ex))
            LOGGER.exception(msg)
            indices_dic = {'dummy': []}

        ncs_references = []
        species_files = []
        stat_infos = []
        PAmask_pngs = []

        response.update_status('Start processing for {} Datasets'.format(len(indices_dic.keys())))

        for count, key in enumerate(indices_dic.keys()):
            try:
                status_nr = 40 + count * 10
                response.update_status('Start processing of {}'.format(key), status_nr)

                ncs = indices_dic[key]
                LOGGER.info('with {} files'.format(len(ncs)))

                try:
                    response.update_status('generating the PA mask', 20)
                    PAmask = sdm.get_PAmask(coordinates=latlon, nc=ncs[0])
                    LOGGER.info('PA mask successfully generated')
                except Exception as ex:
                    msg = 'failed to generate the PA mask: {}'.format(str(ex))
                    LOGGER.exception(msg)
                    raise Exception(msg)

                try:
                    response.update_status('Plotting PA mask', 25)
                    PAmask_pngs.extend([map_PAmask(PAmask)])
                except Exception as ex:
                    msg = 'failed to plot the PA mask: {}'.format(str(ex))
                    LOGGER.exception(msg)
                    raise Exception(msg)

                try:
                    ncs_reference = sdm.get_reference(ncs_indices=ncs, period=period)
                    ncs_references.extend(ncs_reference)
                    LOGGER.info('reference indice calculated %s '
                                % ncs_references)
                except Exception as ex:
                    msg = 'failed to calculate the reference: {}'.format(str(ex))
                    LOGGER.exception(msg)
                    raise Exception(msg)

                try:
                    gam_model, predict_gam, gam_info = sdm.get_gam(ncs_reference, PAmask)
                    stat_infos.append(gam_info)
                    response.update_status('GAM sucessfully trained', status_nr + 5)
                except Exception as ex:
                    msg = 'failed to train GAM for {}: {}'.format(key, str(ex))
                    LOGGER.exception(msg)
                    raise Exception(msg)

                try:
                    prediction = sdm.get_prediction(gam_model, ncs)
                    response.update_status('prediction done', status_nr + 7)
                except Exception as ex:
                    msg = 'failed to predict tree occurrence: {}'.format(str(ex))
                    LOGGER.exception(msg)
                    raise Exception(msg)

                # try:
                #     response.update_status('land sea mask for predicted data',  status_nr + 8)
                #     from numpy import invert, isnan, nan, broadcast_arrays  # , array, zeros, linspace, meshgrid
                #     mask = invert(isnan(PAmask))
                #     mask = broadcast_arrays(prediction, mask)[1]
                #     prediction[mask is False] = nan
                # except:
                #     LOGGER.exception('failed to mask predicted data')

                try:
                    species_files.append(sdm.write_to_file(ncs[0], prediction))
                    LOGGER.info('Favourability written to file')
                except Exception as ex:
                    msg = 'failed to write species file: {}'.format(str(ex))
                    LOGGER.exception(msg)
                    raise Exception(msg)

            except Exception as ex:
                msg = 'failed to calculate reference indices: {}'.format(str(ex))
                LOGGER.exception(msg)
                raise Exception(msg)

        try:
            archive_indices = archive(ncs_indices, format=archive_format)
            LOGGER.info('indices added to archive')
        except Exception as ex:
            msg = 'failed adding indices to archive: {}'.format(str(ex))
            LOGGER.exception(msg)
            raise Exception(msg)
            archive_indices = tempfile.mkstemp(suffix='.tar', prefix='foobar-', dir='.')

        try:
            archive_references = archive(ncs_references, format=archive_format)
            LOGGER.info('indices reference added to archive')
        except Exception as ex:
            msg = 'failed adding reference indices to archive: {}'.format(str(ex))
            LOGGER.exception(msg)
            raise Exception(msg)
            archive_references = tempfile.mkstemp(suffix='.tar', prefix='foobar-', dir='.')

        try:
            archive_prediction = archive(species_files, format=archive_format)
            LOGGER.info('species_files added to archive')
        except Exception as ex:
            msg = 'failed adding species_files indices to archive: {}'.format(str(ex))
            LOGGER.exception(msg)
            raise Exception(msg)

        try:
            stat_infosconcat = pdfmerge(stat_infos)
            LOGGER.debug('pngs {}'.format(PAmask_pngs))
            PAmask_png = concat_images(PAmask_pngs, orientation='h')
            LOGGER.info('stat info pdfs and mask pngs merged')
        except Exception as ex:
            msg = 'failed to concat images: {}'.format(str(ex))
            LOGGER.exception(msg)
            raise Exception(msg)
            _, stat_infosconcat = tempfile.mkstemp(suffix='.pdf', prefix='foobar-', dir='.')
            _, PAmask_png = tempfile.mkstemp(suffix='.png', prefix='foobar-', dir='.')

        response.outputs['output_gbif'].file = occurence_map
        response.outputs['output_PA'].file = PAmask_png
        response.outputs['output_indices'].file = archive_indices
        response.outputs['output_reference'].file = archive_references
        response.outputs['output_prediction'].file = archive_prediction
        response.outputs['output_info'].file = stat_infosconcat
        response.outputs['output_csv'].file = gbifcsv

        response.update_status('done', 100)
        return response
Пример #56
0
 def setUp(self):
     _, self.mfst_file_name = tempfile.mkstemp()
Пример #57
0
 def temp_file(extension='.tmp'):
     (fd, tmp_file) = tempfile.mkstemp(extension)
     return tmp_file
Пример #58
0
def run_code(code_to_run, path, module=None, cls=None, shows_plot=False, imports_not_required=False):
    """
    Run the given code chunk and collect the output.
    """

    skipped = False
    failed = False

    if cls is None:
        use_mpi = False
    else:
        try:
            import mpi4py
        except ImportError:
            use_mpi = False
        else:
            N_PROCS = getattr(cls, 'N_PROCS', 1)
            use_mpi = N_PROCS > 1

    try:
        # use subprocess to run code to avoid any nasty interactions between codes

        # Move to the test directory in case there are files to read.
        save_dir = os.getcwd()

        if module is None:
            code_dir = os.path.dirname(os.path.abspath(path))
        else:
            code_dir = os.path.dirname(os.path.abspath(module.__file__))

        os.chdir(code_dir)

        if use_mpi:
            env = os.environ.copy()

            # output will be written to one file per process
            env['USE_PROC_FILES'] = '1'

            env['OPENMDAO_CURRENT_MODULE'] = module.__name__
            env['OPENMDAO_CODE_TO_RUN'] = code_to_run

            p = subprocess.Popen(['mpirun', '-n', str(N_PROCS), sys.executable, _sub_runner],
                                 env=env)
            p.wait()

            # extract output blocks from all output files & merge them
            output = []
            for i in range(N_PROCS):
                with open('%d.out' % i) as f:
                    output.append(f.read())
                os.remove('%d.out' % i)

        elif shows_plot:
            if module is None:
                # write code to a file so we can run it.
                fd, code_to_run_path = tempfile.mkstemp()
                with os.fdopen(fd, 'w') as tmp:
                    tmp.write(code_to_run)
                try:
                    p = subprocess.Popen([sys.executable, code_to_run_path],
                                         stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=os.environ)
                    output, _ = p.communicate()
                    if p.returncode != 0:
                        failed = True

                finally:
                    os.remove(code_to_run_path)
            else:
                env = os.environ.copy()

                env['OPENMDAO_CURRENT_MODULE'] = module.__name__
                env['OPENMDAO_CODE_TO_RUN'] = code_to_run

                p = subprocess.Popen([sys.executable, _sub_runner],
                                     stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=env)
                output, _ = p.communicate()
                if p.returncode != 0:
                    failed = True

            output = output.decode('utf-8', 'ignore')
        else:
            # just exec() the code for serial tests.

            # capture all output
            stdout = sys.stdout
            stderr = sys.stderr
            strout = StringIO()
            sys.stdout = strout
            sys.stderr = strout

            # We need more precision from numpy
            with printoptions(precision=8):

                if module is None:
                    globals_dict = {
                        '__file__': path,
                        '__name__': '__main__',
                        '__package__': None,
                        '__cached__': None,
                    }
                else:
                    if imports_not_required:
                        # code does not need to include all imports
                        # Get from module
                        globals_dict = module.__dict__
                    else:
                        globals_dict = {}

                try:
                    exec(code_to_run, globals_dict)
                except Exception as err:
                    # for actual errors, print code (with line numbers) to facilitate debugging
                    if not isinstance(err, unittest.SkipTest):
                        for n, line in enumerate(code_to_run.split('\n')):
                            print('%4d: %s' % (n, line), file=stderr)
                    raise
                finally:
                    sys.stdout = stdout
                    sys.stderr = stderr

            output = strout.getvalue()

    except subprocess.CalledProcessError as e:
        output = e.output.decode('utf-8', 'ignore')
        # Get a traceback.
        if 'raise unittest.SkipTest' in output:
            reason_for_skip = output.splitlines()[-1][len('unittest.case.SkipTest: '):]
            output = reason_for_skip
            skipped = True
        else:
            output = "Running of embedded code {} in docs failed due to: \n\n{}".format(path, output)
            failed = True
    except unittest.SkipTest as skip:
        output = str(skip)
        skipped = True
    except Exception as exc:
        output = "Running of embedded code {} in docs failed due to: \n\n{}".format(path, traceback.format_exc())
        failed = True
    finally:
        os.chdir(save_dir)

    return skipped, failed, output
Пример #59
0
 def setUp(self):
     self.uvc = UpdateVerifyConfig()
     fd, self.tmpfilename = mkstemp()
     self.tmpfile = os.fdopen(fd, "wb")
Пример #60
0
def main():
    parser = argparse.ArgumentParser(
        description='Run clang-tidy against changed files, and '
        'output diagnostics only for modified '
        'lines.')
    parser.add_argument('-clang-tidy-binary',
                        metavar='PATH',
                        default='clang-tidy',
                        help='path to clang-tidy binary')
    parser.add_argument('-p',
                        metavar='NUM',
                        default=0,
                        help='strip the smallest prefix containing P slashes')
    parser.add_argument('-regex',
                        metavar='PATTERN',
                        default=None,
                        help='custom pattern selecting file paths to check '
                        '(case sensitive, overrides -iregex)')
    parser.add_argument('-iregex',
                        metavar='PATTERN',
                        default=r'.*\.(cpp|cc|c\+\+|cxx|c|cl|h|hpp|m|mm|inc)',
                        help='custom pattern selecting file paths to check '
                        '(case insensitive, overridden by -regex)')
    parser.add_argument('-j',
                        type=int,
                        default=1,
                        help='number of tidy instances to be run in parallel.')
    parser.add_argument('-timeout',
                        type=int,
                        default=None,
                        help='timeout per each file in seconds.')
    parser.add_argument('-fix',
                        action='store_true',
                        default=False,
                        help='apply suggested fixes')
    parser.add_argument(
        '-checks',
        help='checks filter, when not specified, use clang-tidy '
        'default',
        default='')
    parser.add_argument('-path',
                        dest='build_path',
                        help='Path used to read a compile command database.')
    if yaml:
        parser.add_argument(
            '-export-fixes',
            metavar='FILE',
            dest='export_fixes',
            help='Create a yaml file to store suggested fixes in, '
            'which can be applied with clang-apply-replacements.')
    parser.add_argument('-extra-arg',
                        dest='extra_arg',
                        action='append',
                        default=[],
                        help='Additional argument to append to the compiler '
                        'command line.')
    parser.add_argument('-extra-arg-before',
                        dest='extra_arg_before',
                        action='append',
                        default=[],
                        help='Additional argument to prepend to the compiler '
                        'command line.')
    parser.add_argument('-quiet',
                        action='store_true',
                        default=False,
                        help='Run clang-tidy in quiet mode')
    clang_tidy_args = []
    argv = sys.argv[1:]
    if '--' in argv:
        clang_tidy_args.extend(argv[argv.index('--'):])
        argv = argv[:argv.index('--')]

    args = parser.parse_args(argv)

    # Extract changed lines for each file.
    filename = None
    lines_by_file = {}
    for line in sys.stdin:
        match = re.search('^\+\+\+\ \"?(.*?/){%s}([^ \t\n\"]*)' % args.p, line)
        if match:
            filename = match.group(2)
        if filename is None:
            continue

        if args.regex is not None:
            if not re.match('^%s$' % args.regex, filename):
                continue
        else:
            if not re.match('^%s$' % args.iregex, filename, re.IGNORECASE):
                continue

        match = re.search('^@@.*\+(\d+)(,(\d+))?', line)
        if match:
            start_line = int(match.group(1))
            line_count = 1
            if match.group(3):
                line_count = int(match.group(3))
            if line_count == 0:
                continue
            end_line = start_line + line_count - 1
            lines_by_file.setdefault(filename,
                                     []).append([start_line, end_line])

    if not any(lines_by_file):
        print("No relevant changes found.")
        sys.exit(0)

    max_task_count = args.j
    if max_task_count == 0:
        max_task_count = multiprocessing.cpu_count()
    max_task_count = min(len(lines_by_file), max_task_count)

    tmpdir = None
    if yaml and args.export_fixes:
        tmpdir = tempfile.mkdtemp()

    # Tasks for clang-tidy.
    task_queue = queue.Queue(max_task_count)
    # A lock for console output.
    lock = threading.Lock()

    # Run a pool of clang-tidy workers.
    start_workers(max_task_count, run_tidy, task_queue, lock, args.timeout)

    # Form the common args list.
    common_clang_tidy_args = []
    if args.fix:
        common_clang_tidy_args.append('-fix')
    if args.checks != '':
        common_clang_tidy_args.append('-checks=' + args.checks)
    if args.quiet:
        common_clang_tidy_args.append('-quiet')
    if args.build_path is not None:
        common_clang_tidy_args.append('-p=%s' % args.build_path)
    for arg in args.extra_arg:
        common_clang_tidy_args.append('-extra-arg=%s' % arg)
    for arg in args.extra_arg_before:
        common_clang_tidy_args.append('-extra-arg-before=%s' % arg)

    for name in lines_by_file:
        line_filter_json = json.dumps([{
            "name": name,
            "lines": lines_by_file[name]
        }],
                                      separators=(',', ':'))

        # Run clang-tidy on files containing changes.
        command = [args.clang_tidy_binary]
        command.append('-line-filter=' + line_filter_json)
        if yaml and args.export_fixes:
            # Get a temporary file. We immediately close the handle so clang-tidy can
            # overwrite it.
            (handle, tmp_name) = tempfile.mkstemp(suffix='.yaml', dir=tmpdir)
            os.close(handle)
            command.append('-export-fixes=' + tmp_name)
        command.extend(common_clang_tidy_args)
        command.append(name)
        command.extend(clang_tidy_args)

        task_queue.put(command)

    # Wait for all threads to be done.
    task_queue.join()

    if yaml and args.export_fixes:
        print('Writing fixes to ' + args.export_fixes + ' ...')
        try:
            merge_replacement_files(tmpdir, args.export_fixes)
        except:
            sys.stderr.write('Error exporting fixes.\n')
            traceback.print_exc()

    if tmpdir:
        shutil.rmtree(tmpdir)