コード例 #1
0
ファイル: conftest.py プロジェクト: cjlee112/socraticqs2
def temp_image():
    media_dir = settings.MEDIA_ROOT
    if not os.path.exists(media_dir):
        os.makedirs(media_dir)
    temp = TemporaryFile(suffix='.jpeg', dir=media_dir)
    temp.write(base64_gif_image())
    yield File(temp)
コード例 #2
0
ファイル: test_utils.py プロジェクト: AsylumCorp/swift
 def __init__(self):
     self.stdin = TemporaryFile('w')
     self.stdout = TemporaryFile('r')
     self.stderr = TemporaryFile('r')
     self.__stderr__ = self.stderr
     self.stdio_fds = [self.stdin.fileno(), self.stdout.fileno(),
                       self.stderr.fileno()]
コード例 #3
0
 def draw(self):
     """Erstellt Chart als GIF. Gibt das GIF als string zurück."""
     from tempfile import TemporaryFile
     f = TemporaryFile()
     self.chart.draw(f)
     f.seek(0)
     return f.read()
コード例 #4
0
ファイル: judge.py プロジェクト: LiQuidFly/OnlineJudge
    def pyc(self):
        ofile=TemporaryFile('w+t')
        if self.ua:
            dst=ANSWER_PATH+self.id+'.pyc'
        else:
            dst=BINARY_PATH+self.id+'.pyc'
        cmd=['python',dst]
        p=Popen(cmd,stdin=self.ifile,stdout=ofile,universal_newlines=True,
                preexec_fn=Tester.Limiter(self.lcpu,self.lmem),stderr=DEVNULL)
        p.wait()

        self.result=0
        if p.returncode==-9:
            self.result=-5
        elif p.returncode==-11:
            self.result=-6
        elif p.returncode==-25:
            self.result=-4
        elif p.returncode<0:
            self.result=-3
        else:
            ofile.seek(0)
            if self.output!=ofile.read(-1):
                self.result=-7
        
        pass
コード例 #5
0
ファイル: test_rio.py プロジェクト: Distrotech/bzr
    def test_read_several(self):
        """Read several stanzas from file"""
        tmpf = TemporaryFile()
        tmpf.write("""\
version_header: 1

name: foo
val: 123

name: quoted
address:   "Willowglen"
\t  42 Wallaby Way
\t  Sydney

name: bar
val: 129319
""")
        tmpf.seek(0)
        s = read_stanza(tmpf)
        self.assertEquals(s, Stanza(version_header='1'))
        s = read_stanza(tmpf)
        self.assertEquals(s, Stanza(name="foo", val='123'))
        s = read_stanza(tmpf)
        self.assertEqualDiff(s.get('name'), 'quoted')
        self.assertEqualDiff(s.get('address'), '  "Willowglen"\n  42 Wallaby Way\n  Sydney')
        s = read_stanza(tmpf)
        self.assertEquals(s, Stanza(name="bar", val='129319'))
        s = read_stanza(tmpf)
        self.assertEquals(s, None)
        self.check_rio_file(tmpf)
コード例 #6
0
def _getPID():
    """Get PID from specified PIDFile.
    
    Returns:
        int: >0 -- RESTservice PID
             -1 -- _pidfile contains invalid PID
             -2 -- _pidfile not found
    """
    pid = 0

    try:
        f = open(_pidfile, 'r')
        pid = int(f.read())
        f.close()
    except IOError as e:
        if e.errno == 2:
            return -2
        raise e
    except ValueError:
        return -1

    # Double check PID from PIDFile:
    outfile = TemporaryFile(mode='w+')
    call(['ps', 'x'], stdout=outfile)
    outfile.seek(0)
    for line in outfile:
        line = line.strip()
        if line.startswith(str(pid)) and line.endswith(_script_name):
            return pid

    return -1
コード例 #7
0
class PackageZipBuilder(object):

    def __init__(self, namespace, version=None):
        self.namespace = namespace
        self.version = version

    def open_zip(self):
        self.zip_file = TemporaryFile()
        self.zip= ZipFile(self.zip_file, 'w')

    def install_package(self):
        self.open_zip()
        if not self.version:
            raise ValueError('You must provide a version to install a package')

        package_xml = PACKAGE_XML % self.namespace
        #package_xml = package_xml.encode('utf-8')
        self.zip.writestr('package.xml', package_xml)

        installed_package = INSTALLED_PACKAGE % self.version
        #installed_package.encode('utf-8')
        self.zip.writestr('installedPackages/%s.installedPackage' % self.namespace, installed_package)

        return self.encode_zip()

    def uninstall_package(self):
        self.open_zip()
        self.zip.writestr('package.xml', EMPTY_PACKAGE_XML)
        self.zip.writestr('destructiveChanges.xml', PACKAGE_XML % self.namespace)
        return self.encode_zip()
        
    def encode_zip(self):
        self.zip.close()
        self.zip_file.seek(0)
        return b64encode(self.zip_file.read())
コード例 #8
0
ファイル: new_backup.py プロジェクト: foomango/myfalcon
def backup_dir(key, data_node, directory):
  temp = TemporaryFile()
  archiver = Popen(["ssh", data_node, "tar", "c", directory], stdout=PIPE)
  compressor = Popen(["lzma", "-z", "-9"], stdin=archiver.stdout, stdout=temp)
  compressor.wait()
  temp.seek(0)
  key.set_contents_from_file(temp)
コード例 #9
0
ファイル: test_read.py プロジェクト: ericgazoni/openpyxl
def test_get_xml_iter():
    #1 file object
    #2 stream (file-like)
    #3 string
    #4 zipfile
    from openpyxl.reader.worksheet import _get_xml_iter
    from tempfile import TemporaryFile
    FUT = _get_xml_iter
    s = ""
    stream = FUT(s)
    assert isinstance(stream, BytesIO), type(stream)

    u = unicode(s)
    stream = FUT(u)
    assert isinstance(stream, BytesIO), type(stream)

    f = TemporaryFile(mode='rb+', prefix='openpyxl.', suffix='.unpack.temp')
    stream = FUT(f)
    assert isinstance(stream, tempfile), type(stream)
    f.close()

    from zipfile import ZipFile
    t = TemporaryFile()
    z = ZipFile(t, mode="w")
    z.writestr("test", "whatever")
    stream = FUT(z.open("test"))
    assert hasattr(stream, "read")
    z.close()
コード例 #10
0
ファイル: util.py プロジェクト: spilgames/disco
def dsorted(iterable, buffer_size=1e6, tempdir="."):
    from disco.compat import pickle_load, pickle_dump
    from heapq import merge
    from itertools import islice
    from tempfile import TemporaryFile

    def read(handle):
        while True:
            try:
                yield pickle_load(handle)
            except EOFError:
                return

    iterator = iter(iterable)
    subiters = []
    while True:
        buffer = sorted(islice(iterator, buffer_size))
        handle = TemporaryFile(dir=tempdir)
        for item in buffer:
            pickle_dump(item, handle, -1)
        handle.seek(0)
        subiters.append(read(handle))
        if len(buffer) < buffer_size:
            break
    return merge(*subiters)
コード例 #11
0
def set_sff_trimpoints_with_sfftools(
        sff_dir, technical_lengths, sffinfo_path='sffinfo', sfffile_path='sfffile',
        debug=False):
    """Set trimpoints to end of technical read for all SFF files in directory.

    This function essentially provides the reference implementation.
    It uses the official sfftools from Roche to process the SFF files.
    """
    if not (exists(sffinfo_path) or which(sffinfo_path)):
        raise ApplicationNotFoundError(
            'sffinfo executable not found. Is it installed and in your $PATH?')
    if not (exists(sfffile_path) or which(sfffile_path)):
        raise ApplicationNotFoundError(
            'sfffile executable not found. Is it installed and in your $PATH?')

    for lib_id, sff_fp in get_per_lib_sff_fps(sff_dir):
        try:
            readlength = technical_lengths[lib_id]
        except KeyError:
            continue

        sffinfo_args = [sffinfo_path, '-s', sff_fp]
        if debug:
            print "Running sffinfo command %s" % sffinfo_args
        sffinfo_output_file = TemporaryFile()
        check_call(sffinfo_args, stdout=sffinfo_output_file)
        sffinfo_output_file.seek(0)

        seqlengths = {}
        for line in sffinfo_output_file:
            if line.startswith('>'):
                fields = line[1:].split()
                seq_len = fields[1].split('=')[1]
                seqlengths[fields[0]] = seq_len

        trim_fp = sff_fp + '.trim'
        trim_file = open(trim_fp, 'w')
        for id_, length in seqlengths.items():
            curr_length = int(seqlengths[id_])
            # Sfftools use 1-based index
            left_trim = readlength + 1
            # Key sequence not included in FASTA length
            right_trim = curr_length + 4
            if curr_length > left_trim:
                trim_file.write(
                    "%s\t%s\t%s\n" % (id_, left_trim, right_trim))
            else:
                stderr.write(
                    'Rejected read %s with trim points %s and %s (orig '
                    'length %s)' % (id_, left_trim, curr_length, length))
        trim_file.close()

        trimmed_sff_fp = sff_fp + '.trimmed'
        sfffile_args = [
            sfffile_path, '-t', trim_fp, '-o', trimmed_sff_fp, sff_fp]
        if debug:
            print "Running sfffile command:", sfffile_args
        check_call(sfffile_args, stdout=open(devnull, 'w'))
        remove(sff_fp)
        rename(trimmed_sff_fp, sff_fp)
コード例 #12
0
ファイル: allPythonContent.py プロジェクト: Mondego/pyreco
  def test_one_key_per_block_writer(self):
    # 2 pointers and a 1 byte string null terminated string = 10 bytes
    stream = TemporaryFile()
    
    i = IndexWriter(stream, block_size=10, terminator='\0')
    i.add(0, 'b')
    eq_(len(i.indexes), 1)
    
    i.add(0, 'c')
    eq_(len(i.indexes), 2)
    i.finish()


    stream.seek(0)
    packet = stream.read()
    eq_(len(packet), 30)
    

    root_block = packet[:10]
    eq_(root_block, '\x01\x00\x00\x00c\x00\x02\x00\x00\x00')
    
    block_1 = packet[10:20]
    eq_(block_1, '\x03\x00\x00\x00b\x00\x04\x00\x00\x00')
    
    block_2 = packet[20:]
    eq_(block_2, '\x04\x00\x00\x00c\x00\x05\x00\x00\x00')
コード例 #13
0
def importXML(jar, file, clue=""):
    from OFS.XMLExportImport import save_record, save_zopedata, start_zopedata
    from tempfile import TemporaryFile
    import xml.parsers.expat

    if type(file) is str:
        file = open(file, "rb")
    outfile = TemporaryFile()
    data = file.read()
    F = xmlPickler()
    F.end_handlers["record"] = save_record
    F.end_handlers["ZopeData"] = save_zopedata
    F.start_handlers["ZopeData"] = start_zopedata
    F.binary = 1
    F.file = outfile
    # Our BTs XML files don't declare encoding but have accented chars in them
    # So we have to declare an encoding but not use unicode, so the unpickler
    # can deal with the utf-8 strings directly
    p = xml.parsers.expat.ParserCreate("utf-8")
    p.returns_unicode = False

    p.CharacterDataHandler = F.handle_data
    p.StartElementHandler = F.unknown_starttag
    p.EndElementHandler = F.unknown_endtag
    r = p.Parse(data)
    outfile.seek(0)
    return jar.importFile(outfile, clue)
コード例 #14
0
ファイル: s3.py プロジェクト: klahnakoski/MoTreeherder
    def write_lines(self, key, lines):
        self._verify_key_format(key)
        storage = self.bucket.new_key(key + ".json.gz")

        buff = TemporaryFile()
        archive = gzip.GzipFile(fileobj=buff, mode='w')
        count = 0
        for l in lines:
            if hasattr(l, "__iter__"):
                for ll in l:
                    archive.write(ll.encode("utf8"))
                    archive.write(b"\n")
                    count += 1
            else:
                archive.write(l.encode("utf8"))
                archive.write(b"\n")
                count += 1
        archive.close()
        file_length = buff.tell()

        retry = 3
        while retry:
            try:
                with Timer("Sending {{count}} lines in {{file_length|comma}} bytes", {"file_length": file_length, "count": count}, debug=self.settings.debug):
                    buff.seek(0)
                    storage.set_contents_from_file(buff)
                break
            except Exception, e:
                Log.warning("could not push data to s3", cause=e)
                retry -= 1
コード例 #15
0
ファイル: mapreduce.py プロジェクト: MaNDRaXe/PyBabe
def sort_diskbased(stream, field, nsize=100000):
    buf = []
    files = []
    count = 0
    t = None

    def iter_on_file(f):
        try:
            while True:
                (key, v) = cPickle.load(f)
                yield (key, t._make(v))
        except EOFError:
            f.close()
    for elt in stream:
        if isinstance(elt, StreamHeader):
            t = elt.t
            yield elt
        elif isinstance(elt, StreamFooter):
            buf.sort()
            iterables = [iter_on_file(f) for f in files] + [itertools.imap(lambda obj: (getattr(obj, field), obj), buf)]
            for (k, row) in heapq.merge(*iterables):
                yield row
            yield elt
        else:
            buf.append(elt)
            count = count + 1
            if count % nsize == 0:
                buf.sort(key=lambda obj: getattr(obj, field))
                f = TemporaryFile()
                for item in buf:
                    cPickle.dump((getattr(item, field), list(item)), f, cPickle.HIGHEST_PROTOCOL)
                f.flush()
                files.append(f)
                del buf[:]
コード例 #16
0
ファイル: numpyserializer.py プロジェクト: mqtlam/caffe-tools
	def numpy_to_string(array):
		"""Convert numpy array into human-readable string.
		
		Good for passing to other programs.

		Notes:
			human-readable string example:
				1 2 3
				4 5 6
			is a string for the following array:
				[[1,2,3]
				 [4,5,6]]

		Args:
			array (numpy): array to convert to human-readable string

		Returns:
			human-readable string of array

		"""
		f = TemporaryFile()
		np.savetxt(f, array, fmt='%.8g')
		f.seek(0)
		string = f.read()
		return string
コード例 #17
0
ファイル: numpyserializer.py プロジェクト: mqtlam/caffe-tools
	def string_to_numpy(string):
		"""Convert human-readable string into numpy array.
		
		Note:
			loads as floats even if stored as ints. 
			
			human-readable string example:
				1 2 3
				4 5 6
			is a string for the following array:
				[[1,2,3]
				 [4,5,6]]
		
		Args:
			string (string): human-readable string to convert to numpy array

		Returns:
			numpy array

		"""
		f = TemporaryFile()
		f.write(string)
		f.seek(0)
		array = np.loadtxt(f)
		return array
コード例 #18
0
ファイル: git.py プロジェクト: NickeZ/hdl-make
    def check_commit_id(path):
        cur_dir = os.getcwd()
        commit = None
        stderr = TemporaryFile()
        try:
            os.chdir(path)
            git_cmd = 'git log -1 --format="%H" | cut -c1-32'
            git_out = Popen(git_cmd,
                            shell=True,
                            stdin=PIPE,
                            stdout=PIPE,
                            stderr=stderr,
                            close_fds=True)
            errmsg = stderr.readlines()
            if errmsg:
                logging.debug("git error message (in %s): %s" % (path, '\n'.join(errmsg)))

            try:
                commit = git_out.stdout.readlines()[0].strip()
            except IndexError:
                pass
        finally:
            os.chdir(cur_dir)
            stderr.close()
        return commit
コード例 #19
0
 def test_read_subprocess_output(self):
     output_file = TemporaryFile('w+')
     cmd = ['ls', '/']
     output, return_code = _read_subprocess_output(cmd, output_file)
     output_file.close()
     self.assertTrue('tmp' in output, '{} not in {}'.format('tmp', output))
     self.assertEqual(0, return_code)
コード例 #20
0
ファイル: __init__.py プロジェクト: hanul93/pyhwp
def convert_hwp5file_into_odtpkg(hwp5file):
    from tempfile import TemporaryFile
    tmpfile = TemporaryFile()
    import os
    tmpfile2 = os.fdopen( os.dup(tmpfile.fileno()), 'r')

    from zipfile import ZipFile
    zf = ZipFile(tmpfile, 'w')
    from hwp5.hwp5odt import ODTPackage
    odtpkg = ODTPackage(zf)
    try:
        from hwp5.hwp5odt import Converter
        import hwp5.plat

        if haveXSLTTransformer():
            xslt = xslt_with_libreoffice
        else:
            # we use default xslt
            xslt = hwp5.plat.get_xslt()

        # convert without RelaxNG validation
        convert = Converter(xslt)

        # Embed images: see #32 - https://github.com/mete0r/pyhwp/issues/32
        convert(hwp5file, odtpkg, embedimage=True)
    finally:
        odtpkg.close()

    tmpfile2.seek(0)
    odtpkg_stream = InputStreamFromFileLike(tmpfile2)
    odtpkg_storage = StorageFromInputStream(odtpkg_stream)
    return odtpkg_storage
コード例 #21
0
ファイル: collectible.py プロジェクト: mgax/hoover
 def _open(self):
     tmp = TemporaryFile()
     resp = requests.get(self.metadata['url'], stream=True)
     for chunk in resp.iter_content(256*1024):
         tmp.write(chunk)
     tmp.seek(0)
     return tmp
コード例 #22
0
ファイル: email.py プロジェクト: eleyine/QFMS
    def generate_pdf_ticket(registration=None, context=None, encoding='utf-8'):
        import ho.pisa as pisa
        import cStringIO as StringIO
        from django.utils.six import BytesIO

        if not registration and not context:
            raise Http404(_("Invalid arguments"))

        if not context:
            d = ConfirmationEmailView.get_extra_context(registration)
            context = Context(d)
        template = loader.get_template('registration/ticket.html')
        html  = template.render(context)

        if not registration:
            registration = context['r']

        result = StringIO.StringIO()
        pdf = pisa.pisaDocument(StringIO.StringIO(html.encode("ISO-8859-1")), result)
        result = result.getvalue()

        try:
            file = TemporaryFile()
            file.write(result)
            registration.ticket_file = File(file)
            registration.save()
            file.close()
        except Exception, e:
            charge = registration.charge
            if charge:
                charge.save_server_message(
                    ['Failed while saving ticket file'], exception=e)
コード例 #23
0
    def process_response(self, response):
        # Parse the metadata zip file from the response
        zipstr = parseString(response.content).getElementsByTagName('zipFile')
        if zipstr:
            zipstr = zipstr[0].firstChild.nodeValue
        else:
            return self.packages
        zipfp = TemporaryFile()
        zipfp.write(base64.b64decode(zipstr))
        zipfile = ZipFile(zipfp, 'r')
    
        packages = {}
    
        # Loop through all files in the zip skipping anything other than InstalledPackages
        for path in zipfile.namelist():
            if not path.endswith('.installedPackage'):
                continue
            namespace = path.split('/')[-1].split('.')[0]
            version = parseString(zipfile.open(path).read()).getElementsByTagName('versionNumber')
            if version:
                version = version[0].firstChild.nodeValue
    
            packages[namespace] = version

        self.packages = packages
        return self.packages
コード例 #24
0
ファイル: ia_upload.py プロジェクト: digikeri/internetarchive
def main(argv):
    args = docopt(__doc__, argv=argv)

    headers = get_args_dict(args['--header'])
    if args['--size-hint']:
        headers['x-archive-size-hint'] = args['--size-hint']

    # Upload keyword arguments.
    upload_kwargs = dict(
        metadata=get_args_dict(args['--metadata']),
        headers=headers,
        debug=args['--debug'],
        queue_derive=True if args['--no-derive'] is False else False,
        ignore_preexisting_bucket=args['--ignore-bucket'],
        checksum=args['--checksum'],
        verbose=True if args['--quiet'] is False else False,
        retries=int(args['--retries']) if args['--retries'] else 0,
        retries_sleep=int(args['--sleep']),
        delete=args['--delete'],
    )

    if args['<file>'] == ['-'] and not args['-']:
        sys.stderr.write('--remote-name is required when uploading from stdin.\n')
        call(['ia', 'upload', '--help'])
        sys.exit(1)

    # Upload from stdin.
    if args['-']:
        local_file = TemporaryFile()
        local_file.write(sys.stdin.read())
        local_file.seek(0)
        _upload_files(args, args['<identifier>'], local_file, upload_kwargs)

    # Bulk upload using spreadsheet.
    elif args['--spreadsheet']:
        # Use the same session for each upload request.
        session = ArchiveSession()

        spreadsheet = csv.DictReader(open(args['--spreadsheet'], 'rU'))
        prev_identifier = None
        for row in spreadsheet:
            local_file = row['file']
            identifier = row['identifier']
            del row['file']
            del row['identifier']
            if (not identifier) and (prev_identifier):
                identifier = prev_identifier
            # TODO: Clean up how indexed metadata items are coerced
            # into metadata.
            md_args = ['{0}:{1}'.format(k.lower(), v) for (k, v) in row.items() if v]
            metadata = get_args_dict(md_args)
            upload_kwargs['metadata'].update(metadata)
            _upload_files(args, identifier, local_file, upload_kwargs, prev_identifier,
                          session)
            prev_identifier = identifier

    # Upload files.
    else:
        local_file = args['<file>']
        _upload_files(args, args['<identifier>'], local_file, upload_kwargs)
コード例 #25
0
def run_cmd(options, jenkins):
    """Run the jshint command using options.

    Run the jshint command using options and return the output.

    :param options: Options received by the code_analysis_jshint funciton.
    :param jenkins: It is true when the jenkins output is turned on.

    """
    # cmd is a sequence of program arguments
    # first argument is child program
    paths = options['directory'].split('\n')
    cmd = [
        options['jshint-bin'],
        '--verbose',
        '--exclude={0}'.format(options['jshint-exclude'] or ' ')] + paths
    try:
        if jenkins:
            cmd.append('--reporter=jslint')
            output_file_name = os.path.join(options['location'], 'jshint.xml')
            output_file = open(output_file_name, 'w+')
        else:
            output_file = TemporaryFile('w+')

        # Wrapper to subprocess.Popen
        try:
            # Return code is not used for jshint.
            output = read_subprocess_output(cmd, output_file)[0]
            return output
        except OSError:
            log('skip')
            message = 'Command: {0}. Outputfile: {1}'.format(cmd, output_file)
            raise CmdError(message)
    finally:
        output_file.close()
コード例 #26
0
    def build (self):
        data = []
        datapath = self.home.joinpath('data.xml')
        dom = minidom.parse(datapath.absolute().as_posix())
        index = 0
        for page in dom.getElementsByTagName('page'):
            page_data = self.parse_page(page)
            page_data['page.index'] = index
            data.append(page_data)
            index += 1

        data_loader = """
(function initData(w){{
w.Sectioner = new Object();
w.Sectioner.pages = {};
Object.freeze(w.Sectioner.pages);
}})(window);
        """.format(json.dumps(data, indent=2)).encode('UTF-8')

        data_js = TemporaryFile()
        data_js.write(data_loader)
        self.compiler.add_file(data_js, 'data.js')

        for asset in dom.getElementsByTagName('asset'):
            self.parse_asset(asset)

        return data
コード例 #27
0
ファイル: multipart.py プロジェクト: aventurella/crazy-horse
    def read_file(self, data):
        temp_file = TemporaryFile(mode="w+b")

        if "content-length" in self.current_headers:
            temp_file.write(data.read(self.current_headers["content-length"]))
        else:
            bytes = data.readline()

            while not bytes[-2:] == "\r\n":
                temp_file.write(bytes)
                bytes = data.readline()
            
            temp_file.write(bytes.rstrip())
        
        filesize     = temp_file.tell()

        if filesize == 0:
            self.read_boundry(data)
            return

        key          = self.current_headers["content-disposition"]["name"]
        filename     = self.current_headers["content-disposition"].get("filename", "")
        content_type = self.current_headers["content-type"]
        
        if key not in self.files:
            self.files[key] = []

        temp_file.seek(0)
        self.files[key].append({"filename":filename, "filesize":filesize, "content-type":content_type, "data":temp_file})
        
        self.read_boundry(data)
コード例 #28
0
def TempFile(infile=None):
    "Create a suitable temporary file"
    outfile = TemporaryFile()
    if infile and hasattr(infile, 'read'):
        outfile.writelines(infile)
        outfile.seek(0)
    return outfile
コード例 #29
0
def testFile():
    """Creates and returns a test file that you
    can muck around with"""
    file_ = TemporaryFile()
    file_.write(TEST_TEXT)
    file_.seek(0)
    return file_
コード例 #30
0
ファイル: __init__.py プロジェクト: fdouetteau/PyMapReduce
 def run_reduce(self):
     self.stopped_received = 0
     self.merged_files = []
     merged_iterator = None
     while True:
         # Iterate and merge files until all jobs are processed
         get_next = self.get_next_file()
         files = get_next
         # itertools.islice(get_next, self.reduce_max_files)
         all_files = [file for file in files]
         iterables = [self.iter_on_file(file) for file in all_files]
         merged_iterator = heapq.merge(*iterables)
         if self.stopped_received < self.numprocs:
             if self.debug:
                 debug_print("Performing intermediate merge on %u  files" % len(iterables))
             f = TemporaryFile()
             self.merged_files.append(f)
             for m in merged_iterator:
                 cPickle.dump(m, f, cPickle.HIGHEST_PROTOCOL)
             f.seek(0)
             f.flush()
         else:
             break
     if len(self.merged_files) > 0:
         if self.debug:
             debug_print("Final merge")
         # Final merge if required
         merged_iterator = heapq.merge(
             *([self.iter_on_file(stream) for stream in self.merged_files] + [merged_iterator])
         )
     if self.debug:
         debug_print("Reduce loop")
     result = self.reduce_loop(merged_iterator)
     return result
コード例 #31
0
ファイル: _dbg.py プロジェクト: Quansight/pyflyby-old
def remote_print_stack(pid, output=1):
    """
    Tell a target process to print a stack trace.

    This currently only handles the main thread.
    TODO: handle multiple threads.

    @param pid:
      PID of target process.
    @type output:
      C{int}, C{file}, or C{str}
    @param output:
      Output file descriptor.
    """
    # Interpret C{output} argument as a file-like object, file descriptor, or
    # filename.
    if hasattr(output, 'write'):  # file-like object
        output_fh = output
        try:
            output.flush()
        except Exception:
            pass
        try:
            output_fd = output.fileno()
        except Exception:
            output_fd = None
        try:
            output_fn = Filename(output.name)
        except Exception:
            pass
    elif isinstance(output, int):
        output_fh = None
        output_fn = None
        output_fd = output
    elif isinstance(output, (str, Filename)):
        output_fh = None
        output_fn = Filename(output)
        output_fd = None
    else:
        raise TypeError(
            "remote_print_stack_trace(): expected file/str/int; got %s" %
            (type(output).__name__, ))
    temp_file = None
    remote_fn = output_fn
    if remote_fn is None and output_fd is not None:
        remote_fn = Filename("/proc/%d/fd/%d" % (os.getpid(), output_fd))
    # Figure out whether the target process will be able to open output_fn for
    # writing.  Since the target process would need to be running as the same
    # user as this process for us to be able to attach a debugger, we can
    # simply check whether we ourselves can open the file.  Typically output
    # will be fd 1 and we will have access to write to it.  However, if we're
    # sudoed, we won't be able to re-open it via the proc symlink, even though
    # we already currently have it open.  Another case is C{output} is a
    # file-like object that isn't a real file, e.g. a StringO.  In each case
    # we we don't have a usable filename for the remote process yet.  To
    # address these situations, we create a temporary file for the remote
    # process to write to.
    if remote_fn is None or not remote_fn.iswritable:
        if not output_fh or output_fd:
            assert remote_fn is not None
            raise OSError(errno.EACCESS, "Can't write to %s" % output_fn)
        # We can still use the /proc/$pid/fd approach with an unnamed temp
        # file.  If it turns out there are situations where that doesn't work,
        # we can switch to using a NamedTemporaryFile.
        from tempfile import TemporaryFile
        temp_file = TemporaryFile()
        remote_fn = Filename("/proc/%d/fd/%d" %
                             (os.getpid(), temp_file.fileno()))
        assert remote_fn.iswritable
    # *** Do the code injection ***
    _remote_print_stack_to_file(pid, remote_fn)
    # Copy from temp file to the requested output.
    if temp_file is not None:
        data = temp_file.read()
        temp_file.close()
        if output_fh is not None:
            output_fh.write(data)
            output_fh.flush()
        elif output_fd is not None:
            with os.fdopen(output_fd, 'w') as f:
                f.write(data)
        else:
            raise AssertionError("unreacahable")
コード例 #32
0
    def transfer_yaml():
        print(" * Transferring yml")
        upload_folder = os.path.join(app.root_path,
                                     app.config['UPLOAD_FOLDER'])
        if request.method == 'GET':
            tarfile_backend = TemporaryFile(mode='wb+')
            yamlfile = TemporaryFile(mode='wb+')
            tarball = tarfile.open(fileobj=tarfile_backend, mode='w')
            visible_only = request.args.get('visibleOnly',
                                            default=False,
                                            type=bool)
            remove_flags = request.args.get('removeFlags',
                                            default=False,
                                            type=bool)

            yamlfile.write(
                bytes(
                    export_challenges(out_file='export.yaml',
                                      dst_attachments='export.d',
                                      src_attachments=upload_folder,
                                      visible_only=visible_only,
                                      remove_flags=remove_flags,
                                      tarfile=tarball), "UTF-8"))

            tarinfo = tarfile.TarInfo('export.yaml')
            tarinfo.size = yamlfile.tell()
            yamlfile.seek(0)
            tarball.addfile(tarinfo, yamlfile)
            tarball.close()
            yamlfile.close()

            gzipfile_backend = TemporaryFile(mode='wb+')
            gzipfile = GzipFile(fileobj=gzipfile_backend, mode='wb')

            tarfile_backend.seek(0)
            shutil.copyfileobj(tarfile_backend, gzipfile)

            tarfile_backend.close()
            gzipfile.close()
            gzipfile_backend.seek(0)
            return send_file(gzipfile_backend,
                             as_attachment=True,
                             attachment_filename='export.tar.gz')

        if request.method == 'POST':
            if 'file' not in request.files:
                abort(400)

            file = request.files['file']

            readmode = 'r:gz'
            if file.filename.endswith('.tar'):
                readmode = 'r'
            if file.filename.endswith('.bz2'):
                readmode = 'r:bz2'

            tempdir = mkdtemp()
            try:
                archive = tarfile.open(fileobj=file.stream, mode=readmode)

                if 'export.yaml' not in archive.getnames():
                    shutil.rmtree(tempdir)
                    abort(400)

                # Check for attempts to escape to higher dirs
                for member in archive.getmembers():
                    memberpath = os.path.normpath(member.name)
                    if memberpath.startswith('/') or '..' in memberpath.split(
                            '/'):
                        shutil.rmtree(tempdir)
                        abort(400)

                    if member.linkname:
                        linkpath = os.path.normpath(member.linkname)
                        if linkpath.startswith('/') or '..' in linkpath.split(
                                '/'):
                            shutil.rmtree(tempdir)
                            abort(400)

                archive.extractall(path=tempdir)

            except tarfile.TarError:
                shutil.rmtree(tempdir)
                print('b')
                abort(400)

            in_file = os.path.join(tempdir, 'export.yaml')
            import_challenges(in_file, upload_folder, move=True)

            shutil.rmtree(tempdir)

            return '1'
コード例 #33
0
def _create_image(format, *a, **kw):
    buf = TemporaryFile()
    Image.new(*a, **kw).save(buf, format)
    buf.seek(0)
    return Image.open(buf)
コード例 #34
0
ファイル: build.py プロジェクト: zoglesby/xiphos
 def setUp(self):
     self.fnull = TemporaryFile()
コード例 #35
0
ファイル: initdb.py プロジェクト: x0james/faraday
    def run(self, choose_password):
        """
             Main entry point that executes these steps:
                 * creates role in database.
                 * creates database.
                 * save new configuration on server.ini.
                 * creates tables.
        """
        try:
            config = ConfigParser()
            config.read(LOCAL_CONFIG_FILE)
            if not self._check_current_config(config):
                return
            faraday_path_conf = os.path.expanduser(CONST_FARADAY_HOME_PATH)
            # we use psql_log_filename for historical saving. we will ask faraday users this file.
            # current_psql_output is for checking psql command already known errors for each execution.
            psql_log_filename = os.path.join(faraday_path_conf, 'logs',
                                             'psql_log.log')
            current_psql_output = TemporaryFile()
            with open(psql_log_filename, 'a+') as psql_log_file:
                hostname = 'localhost'
                username, password, process_status = self._configure_new_postgres_user(
                    current_psql_output)
                current_psql_output.seek(0)
                psql_output = current_psql_output.read()
                # persist log in the faraday log psql_log.log
                psql_log_file.write(psql_output)
                self._check_psql_output(current_psql_output, process_status)

                if hostname.lower() in ['localhost', '127.0.0.1']:
                    database_name = 'faraday'
                    current_psql_output = TemporaryFile()
                    database_name, process_status = self._create_database(
                        database_name, username, current_psql_output)
                    current_psql_output.seek(0)
                    self._check_psql_output(current_psql_output,
                                            process_status)

            current_psql_output.close()
            conn_string = self._save_config(config, username, password,
                                            database_name, hostname)
            self._create_tables(conn_string)
            couchdb_config_present = server.config.couchdb
            if not (couchdb_config_present and couchdb_config_present.user
                    and couchdb_config_present.password):
                self._create_admin_user(conn_string, choose_password)
            else:
                print(
                    'Skipping new admin creation since couchdb configuration was found.'
                )
        except KeyboardInterrupt:
            current_psql_output.close()
            print('User cancelled.')
            sys.exit(1)
コード例 #36
0
def train(flow,
          trainX,
          valX,
          cond_train=None,
          cond_val=None,
          loss_f=None,
          post_training_f=None,
          post_validation_f=None,
          batch_size=32,
          optimizer=optim.Adam,
          optimizer_kwargs=dict(lr=1e-3, weight_decay=1e-3),
          n_epochs=int(1e6),
          patience=100,
          gradient_clipping=None):
    r"""Train Flow model with (optional) early stopping.

    Can KeyboardInterrupt safely; 
    the resulting model will be the best one before the interruption.

    Args:
        flow (Flow): flow to train.
        
        trainX (torch.Tensor): training dataset.
        valX (torch.Tensor): validation dataset.

        cond_train (torch.Tensor): conditioning tensor for trainX.
            If None, non-conditional flow assumed.
        cond_val (torch.Tensor): conditioning tensor for valX.
            If None, non-conditional flow assumed.

        loss_f (func): function(batch, idx, cond=None) to use as loss. 
            If None, uses flow.nll(batch, cond=cond) instead.

            idx is an index tensor signaling which entries in trainX or valX
            (depending on whether flow.training is True) are contained in batch.
            cond is an optional keyword argument with the conditioning tensor,
            if the flow is conditional. Otherwise, it's just None 
            and should be ignored.
            Returns a tensor with the loss computed for each entry in the batch.
            
        
        batch_size (int or float): If float, ratio of trainX to use per batch.
            If int, batch size.
        optimizer (torch.optim.Optimizer): optimizer class to use.
        optimizer_kwargs (dict): kwargs to pass to the optimizer.

        n_epochs (int): maximum number of epochs for training.
        patience (int): maximum number of epochs with no improvement
            in validation loss before stopping. 
            To avoid using early stopping, set to 0.

    Returns:
        train_losses: list with entries (float(epoch), loss).
        val_losses: list with entries (epoch, loss).

    The results of this function can be passed to `plot_losses` directly.
    """

    assert isinstance(flow, Flow)
    assert flow.prior is not None, 'flow.prior is required'

    conditional = cond_train is not None or cond_val is not None
    if conditional:
        assert (cond_train is not None and cond_val is not None), \
            'If flow is conditional, pass cond_train and cond_val'
    else:
        cond = None  # let's just leave it as a None for later

    if isinstance(batch_size, float):
        assert 0. < batch_size and batch_size <= 1.
        batch_size = int(batch_size * len(trainX))

    optimizer = optimizer(flow.parameters(), **optimizer_kwargs)

    train_losses, val_losses = [], []

    val_loss = np.inf
    best_loss = np.inf
    best_epoch = 0
    best_model = None

    if loss_f is None:
        loss_f = lambda batch, idx, cond=None: flow.nll(batch, cond=cond)

    best_model = TemporaryFile()

    try:
        with tqdm(n_epochs, leave=True, position=0) as tq:
            for epoch in range(1, n_epochs + 1):
                # Train
                flow.train()
                X = trainX
                idx = torch.randperm(len(X), device=X.device)
                for n in range(0, len(X), batch_size):
                    if len(X) - n == 1: continue
                    subidx = idx[n:n + batch_size]
                    batch = X[subidx].to(flow.device)
                    if conditional:
                        cond = cond_train[subidx].to(flow.device)

                    loss = loss_f(batch, subidx, cond=cond).mean()

                    assert not torch.isnan(loss) and not torch.isinf(loss)

                    # Pytorch recipe: zero_grad - backward - step
                    optimizer.zero_grad()
                    loss.backward()

                    # Gradient clipping
                    if gradient_clipping is not None:
                        assert gradient_clipping > 0
                        nn.utils.clip_grad_norm_(flow.parameters(),
                                                 gradient_clipping)

                    optimizer.step()

                    train_losses.append((epoch + n / len(trainX), loss.item()))

                    tq.set_postfix(
                        OrderedDict(epoch_progress='%.3d%%' %
                                    (n / len(X) * 100),
                                    train_loss='%+.3e' % loss.item(),
                                    last_val_loss='%+.3e' % val_loss,
                                    best_epoch=best_epoch,
                                    best_loss='%+.3e' % best_loss))

                    if post_training_f is not None:
                        post_training_f(batch, subidx, cond=cond)

                # Validation
                flow.eval()
                X = valX
                idx = torch.randperm(len(X), device=X.device)
                with torch.no_grad():  # won't accumulate info about gradient
                    val_loss = 0.
                    for n in range(0, len(X), batch_size):
                        subidx = idx[n:n + batch_size]
                        batch = X[subidx].to(flow.device)
                        if conditional:
                            cond = cond_val[subidx].to(flow.device)

                        val_loss += (loss_f(batch, subidx, cond=cond) /
                                     len(X)).sum().item()

                    val_losses.append((epoch, val_loss))

                    if post_validation_f is not None:
                        post_validation_f()

                assert not np.isnan(val_loss)  # and not np.isinf(val_loss)

                # Early stopping
                if best_loss > val_loss:
                    best_loss = val_loss
                    best_epoch = epoch

                    best_model.seek(0)
                    torch.save(flow.state_dict(), best_model)

                tq.update()
                tq.set_postfix(
                    OrderedDict(epoch_progress='100%',
                                train_loss='%+.3e' % loss.item(),
                                last_val_loss='%+.3e' % val_loss,
                                best_epoch=best_epoch,
                                best_loss='%+.3e' % best_loss))

                if patience and epoch - best_epoch >= patience:
                    break

    except KeyboardInterrupt:
        print('Interrupted at epoch', epoch)
        pass  # halt training without losing everything

    # Load best model before exiting
    best_model.seek(0)
    flow.load_state_dict(torch.load(best_model))
    best_model.close()

    flow.eval()  # pass to eval mode before returning

    return train_losses, val_losses
コード例 #37
0
ファイル: recipe-577379.py プロジェクト: zlrs/code-1
class BI_RLE(object):
    def __init__(self, i, o):
        self.i = open(i)
        self.o = open(o, "w")
        self.tmp = TemporaryFile()

        if self.i.read(2) != 'BM':
            raise IOError, "Not BMP file"

        self.i.seek(10)
        of = self.i.read(4)  #offset to start image data
        self.offset = sum([ord(of[i]) << 8 * i for i in range(len(of))])

        self.i.seek(18)
        w = self.i.read(4)  #image width
        self.w = sum([ord(w[i]) << 8 * i for i in range(len(w))])

        h = self.i.read(4)  #image height
        self.h = sum([ord(h[i]) << 8 * i for i in range(len(h))])

        self.i.seek(28)
        b = self.i.read(2)  #channel:bit per pixel
        self.bpp = sum([ord(b[i]) << 8 * i for i in range(len(b))])

        if self.bpp != 4 and self.bpp != 8:
            raise IOError, "Not 4-Bit or 8-Bit BMP file"

        c = self.i.read(4)  #compression type
        self.comp = sum([ord(c[i]) << 8 * i for i in range(len(c))])

        if self.comp != 2 and self.comp != 1:
            raise IOError, "Not Compressed file"

        self.tPix = self.w * self.h
        self.rPix = 0
        self.lns = 1

        self.c = 0
        self.EORLED = False  #fix for EORLE

        self.i.seek(self.offset)
        self.enc = self.i.read()
        self.dec = ""
        self.buf = ""

    def Decode(self):
        mrk = {
            0: self.EOSL,
            1: self.EORLE,
            2: self.MOFF
        }  #funcs for RLE Data markers

        while ((self.lns * self.w) <= self.tPix):
            b = self.enc[self.c:self.c + 2]
            self.c += 2
            if len(b) != 2: break
            b0, b1 = ord(b[0]), ord(b[1])
            if b0 == 0:
                mrk.get(b1, self.UENCD)(b0, b1)
            else:
                self.DENCD(b0, b1)

    def HPIX(self, pixel):
        """ Half-Byte Packing for 4-Bit and Pixel Data Handler """
        if self.bpp == 4:
            if self.buf == "":
                self.buf = chr(pixel << 4)
            else:
                self.buf = chr(ord(self.buf) | pixel)
                self.tmp.write(self.buf)
                self.buf = ""
        else:
            self.tmp.write(chr(pixel))

    def EOSL(self, *arg):
        """ 00 00: End Of Scan Line """
        remain = self.w - self.rPix
        if not self.EORLED:
            self.rPix = 0
            self.lns += 1
        if remain == 0: remain = 2  #fix for EOSL
        for i in range(remain):
            self.HPIX(0x00)

    def MOFF(self, *arg):
        """ 00 02: Move Offset """
        mov = self.enc[self.c:self.c + 2]
        self.c += 2
        mov = ord(mov[0]) + ord(mov[1]) * self.w
        for i in range(mov):
            self.HPIX(0x00)
        self.rPix += mov
        self.lns += self.rPix // mov
        self.rPix %= mov

    def UENCD(self, *arg):
        """ 00 NN: Unencoded Data """
        p = arg[1]  #unencoded pixels data
        if self.bpp == 4:
            #read bytes with padding byte for 4 bit
            b = int(round(p / 2)) + (int(round(p / 2)) % 2 | p % 2)
        else:
            #read bytes with padding byte for 8 bit
            b = p + p % 2
        ue = self.enc[self.c:self.c + b]
        self.c += b
        delta = self.rPix + p
        for i in range(b):
            if self.rPix == delta: break
            if self.bpp == 4:
                for j in range(2):
                    if self.rPix == delta: break
                    self.HPIX((ord(ue[i]) & (0x0F << (4 * (
                        (j + 1) % 2)))) >> (4 * ((j + 1) % 2)))
                    self.rPix += 1
            else:
                self.HPIX(ord(ue[i]))
                self.rPix += 1

    def DENCD(self, *arg):
        """ NN PP: Decode Encoded Data """
        b0, b1 = arg[0], arg[1]  #piece, 2 pixels data
        for i in range(b0):
            if self.bpp == 4:
                self.HPIX((b1 & (0x0F << (4 *
                                          ((i + 1) % 2)))) >> (4 *
                                                               ((i + 1) % 2)))
            else:
                self.HPIX(b1)
            self.rPix += 1

    def EORLE(self, *arg):
        """ 00 01: End Of RLE Data, Writing Decoded File """
        self.EORLED = True
        self.EOSL()
        if not self.buf == "": self.tmp.write(self.buf)

        self.tmp.seek(0)
        self.dec = self.tmp.read()
        self.tmp.close()

        self.i.seek(0)
        self.o.write(self.i.read(2))  #'BM' signature

        fs = self.offset + len(
            self.dec)  #FileSize: (Header + Color Palette) + ImageData
        fsize = ""  #filesize string value
        for i in range(4):
            fsize += chr(
                (fs & (0xFF << 8 * i)) >> 8 * i)  #ordering as little-endian
        self.o.write(fsize)

        self.i.seek(6)
        self.o.write(self.i.read(24))  #writing 24-byte same data from 6 offset

        self.o.write('\x00\x00\x00\x00')  #compression-type: none

        imgdsize = ""  #image data size string value
        for i in range(4):
            imgdsize += chr((len(self.dec) & (0xFF << 8 * i)) >> 8 * i)
        self.o.write(imgdsize)

        self.i.seek(38)
        self.o.write(self.i.read(self.offset -
                                 38))  #writing left same data from 38

        self.o.write(self.dec)

        self.o.close()
        self.i.close()
コード例 #38
0
def process_unsteady_files(input_filename):
    try:
        Timezero, U, V, W = np.loadtxt(
            input_filename,
            dtype=float,
            delimiter=',',
            skiprows=1,
            usecols=(0, 1, 2, 3),
            unpack=True
        )  # np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
        total_rows = min(len(U), len(V), len(W), len(Timezero))
    except:
        print("File not found, {}".format(input_filename))
        return

    M = int(input("Enter the value of k, i.e. # of Fourier Components : "))
    while (M % 2 != 1):
        print("The Value of k should be strictly odd")
        M = int(input("Enter the value of k, i.e. # of Fourier Components : "))
    output_filename = str(input_filename.split('.')[0]) + "_k_" + str(
        M) + "_" + "Component_" + str((M - 1) / 2) + ".xls"
    print(output_filename)
    U_avg, V_avg, W_avg = all_fourier_calculations(total_rows, U, V, W, M)

    U_prime = []
    V_prime = []
    W_prime = []
    U_prime_U_prime = []
    V_prime_V_prime = []
    W_prime_W_prime = []
    U_prime_V_prime = []
    V_prime_W_prime = []
    W_prime_U_prime = []

    for a, b, c, d, e, f in zip(U, U_avg, V, V_avg, W, W_avg):
        U_prime.append(float('{:01.3f}'.format(a - b)))
        V_prime.append(float('{:01.3f}'.format(c - d)))
        W_prime.append(float('{:01.3f}'.format(e - f)))
        U_prime_U_prime.append(float('{:01.3f}'.format((a - b) * (a - b))))
        V_prime_V_prime.append(float('{:01.3f}'.format((c - d) * (c - d))))
        W_prime_W_prime.append(float('{:01.3f}'.format((e - f) * (e - f))))
        U_prime_V_prime.append(float('{:01.3f}'.format((a - b) * (c - d))))
        V_prime_W_prime.append(float('{:01.3f}'.format((c - d) * (e - f))))
        W_prime_U_prime.append(float('{:01.3f}'.format((e - f) * (a - b))))

    U_prime_U_prime_Average, V_prime_V_prime_Average, W_prime_W_prime_Average = all_fourier_calculations(
        total_rows, U_prime_U_prime, V_prime_V_prime, W_prime_W_prime, M)
    U_prime_V_prime_Average, V_prime_W_prime_Average, W_prime_U_prime_Average = all_fourier_calculations(
        total_rows, U_prime_V_prime, V_prime_W_prime, W_prime_U_prime, M)

    TKE = []
    for a, b, c in zip(U_prime_U_prime_Average, V_prime_V_prime_Average,
                       W_prime_W_prime_Average):
        TKE.append(float('{:01.3f}'.format((a + b + c) / 2)))

    import xlwt
    from tempfile import TemporaryFile
    book = xlwt.Workbook()

    Timezero = Timezero.tolist()
    Timezero.insert(0, "Timezero")

    U = U.tolist()
    U.insert(0, "U")

    V = V.tolist()
    V.insert(0, "V")

    W = W.tolist()
    W.insert(0, "W")

    U_avg.insert(0, "U_avg")
    V_avg.insert(0, "V_avg")
    W_avg.insert(0, "W_avg")

    fig, axs = plt.subplots(2, 3, sharex=True, sharey=True)
    # z='red'
    # marker symbol
    axs[0, 0].scatter(Timezero[1:], U[1:], s=2, c='r', marker=">")
    axs[0, 0].set_title("RAW U Velocities")

    # marker from TeX
    axs[0, 1].scatter(Timezero[1:], V[1:], s=2, c='g', marker=r'$\alpha$')
    axs[0, 1].set_title("RAW V Velocities")

    # marker from path
    # verts = [[-1, -1], [1, -1], [1, 1], [-1, -1]]
    axs[0, 2].scatter(Timezero[1:], W[1:], s=2, c='b', marker=r'$\alpha$')
    axs[0, 2].set_title("RAW W Velocities")

    # regular polygon marker
    axs[1, 0].scatter(Timezero[1:], U_avg[1:], s=2, c='r', marker=(5, 0))
    axs[1, 0].set_title("Fourier Averaged \n U Velocities k= {}".format(M))

    # regular star marker
    axs[1, 1].scatter(Timezero[1:], V_avg[1:], s=2, c='g', marker=(5, 1))
    axs[1, 1].set_title("Fourier Averaged \n V Velocities k= {}".format(M))

    # regular asterisk marker
    axs[1, 2].scatter(Timezero[1:], W_avg[1:], s=2, c='b', marker=(5, 2))
    axs[1, 2].set_title("Fourier Averaged \n W Velocities k= {}".format(M))

    plt.tight_layout()
    plt.savefig(output_filename + ".jpg")
    # plt.show()
    plt.clf()

    U_prime.insert(0, "u'")
    V_prime.insert(0, "v'")
    W_prime.insert(0, "w'")
    U_prime_U_prime.insert(0, "u'u'")
    V_prime_V_prime.insert(0, "v'v'")
    W_prime_W_prime.insert(0, "w'w'")
    U_prime_V_prime.insert(0, "u'v'")
    V_prime_W_prime.insert(0, "v'w'")
    W_prime_U_prime.insert(0, "u'w'")
    U_prime_U_prime_Average.insert(0, "u'u'_Avg")
    V_prime_V_prime_Average.insert(0, "v'v'_Avg")
    W_prime_W_prime_Average.insert(0, "w'w'_Avg")
    U_prime_V_prime_Average.insert(0, "u'v'_Avg")
    V_prime_W_prime_Average.insert(0, "v'w'_Avg")
    W_prime_U_prime_Average.insert(0, "u'w'_Avg")
    TKE.insert(0, "TKE")

    sheet1 = book.add_sheet('Fourier Components')
    for i, e in enumerate(Timezero):
        sheet1.write(i, 0, e)

    for i, e in enumerate(U):
        sheet1.write(i, 1, e)

    for i, e in enumerate(V):
        sheet1.write(i, 2, e)

    for i, e in enumerate(W):
        sheet1.write(i, 3, e)

    for i, e in enumerate(U_avg):
        sheet1.write(i, 4, e)

    for i, e in enumerate(V_avg):
        sheet1.write(i, 5, e)

    for i, e in enumerate(W_avg):
        sheet1.write(i, 6, e)

    for i, e in enumerate(U_prime):
        sheet1.write(i, 7, e)

    for i, e in enumerate(V_prime):
        sheet1.write(i, 8, e)

    for i, e in enumerate(W_prime):
        sheet1.write(i, 9, e)

    for i, e in enumerate(U_prime_U_prime):
        sheet1.write(i, 10, e)

    for i, e in enumerate(V_prime_V_prime):
        sheet1.write(i, 11, e)

    for i, e in enumerate(W_prime_W_prime):
        sheet1.write(i, 12, e)

    for i, e in enumerate(U_prime_V_prime):
        sheet1.write(i, 13, e)

    for i, e in enumerate(V_prime_W_prime):
        sheet1.write(i, 14, e)

    for i, e in enumerate(W_prime_U_prime):
        sheet1.write(i, 15, e)

    for i, e in enumerate(U_prime_U_prime_Average):
        sheet1.write(i, 16, e)

    for i, e in enumerate(V_prime_V_prime_Average):
        sheet1.write(i, 17, e)

    for i, e in enumerate(W_prime_W_prime_Average):
        sheet1.write(i, 18, e)

    for i, e in enumerate(U_prime_V_prime_Average):
        sheet1.write(i, 19, e)

    for i, e in enumerate(V_prime_W_prime_Average):
        sheet1.write(i, 20, e)

    for i, e in enumerate(W_prime_U_prime_Average):
        sheet1.write(i, 21, e)

    for i, e in enumerate(TKE):
        sheet1.write(i, 22, e)

    book.save(output_filename)
    book.save(TemporaryFile())
コード例 #39
0
    def import_lang(self, cr, uid, ids, context=None):
        """
            Import Language
            @param cr: the current row, from the database cursor.
            @param uid: the current user’s ID for security checks.
            @param ids: the ID or list of IDs
            @param context: A standard dictionary
        """
        if context is None:
            context = {}
        import_data = self.browse(cr, uid, ids)[0]
        if import_data.overwrite:
            context.update(overwrite=True)
        fileobj = TemporaryFile('w+')
        fileobj.write(base64.decodestring(import_data.data))

        # now we determine the file format
        fileobj.seek(0)
        first_line = fileobj.readline().strip().replace('"', '').replace(' ', '')
        fileformat = first_line.endswith("type,name,res_id,src,value") and 'csv' or 'po'
        fileobj.seek(0)

        tools.trans_load_data(cr, fileobj, fileformat, import_data.code, lang_name=import_data.name, context=context)
        fileobj.close()
        return {}
コード例 #40
0
    def append(self, seg, crossfade=100):
        seg1, seg2 = AudioSegment._sync(self, seg)

        if not crossfade:
            return seg1._spawn(seg1._data + seg2._data)
        elif crossfade > len(self):
            raise ValueError(
                "Crossfade is longer than the original AudioSegment ({}ms > {}ms)"
                .format(crossfade, len(self)))
        elif crossfade > len(seg):
            raise ValueError(
                "Crossfade is longer than the appended AudioSegment ({}ms > {}ms)"
                .format(crossfade, len(seg)))

        xf = seg1[-crossfade:].fade(to_gain=-120, start=0, end=float('inf'))
        xf *= seg2[:crossfade].fade(from_gain=-120, start=0, end=float('inf'))

        output = TemporaryFile()

        output.write(seg1[:-crossfade]._data)
        output.write(xf._data)
        output.write(seg2[crossfade:]._data)

        output.seek(0)
        obj = seg1._spawn(data=output)
        output.close()
        return obj
コード例 #41
0
ファイル: stream.py プロジェクト: ares57/ctf
 def __init__(self, data=""):
     self.stream = TemporaryFile(mode="w+b")
     self.stream.write(data)
コード例 #42
0
    def run(self, data, store, signal, context, **kwargs):
        """ The main run method of the Python task.

        Args:
            data (:class:`.MultiTaskData`): The data object that has been passed from the
                predecessor task.
            store (:class:`.DataStoreDocument`): The persistent data store object that allows the
                task to store data for access across the current workflow run.
            signal (TaskSignal): The signal object for tasks. It wraps the construction
                and sending of signals into easy to use methods.
            context (TaskContext): The context in which the tasks runs.

        Returns:
            Action (Action): An Action object containing the data that should be passed on
                to the next task and optionally a list of successor tasks that
                should be executed.
        """
        params = self.params.eval(data, store, exclude=['command'])

        capture_stdout = self._callback_stdout is not None or params.capture_stdout
        capture_stderr = self._callback_stderr is not None or params.capture_stderr

        stdout_file = TemporaryFile() if params.capture_stdout else None
        stderr_file = TemporaryFile() if params.capture_stderr else None

        stdout = PIPE if capture_stdout else None
        stderr = PIPE if capture_stderr else None

        # change the user or group under which the process should run
        if params.user is not None or params.group is not None:
            pre_exec = self._run_as(params.user, params.group)
        else:
            pre_exec = None

        # call the command
        proc = Popen(self.params.eval_single('command', data, store),
                     cwd=params.cwd,
                     shell=True,
                     env=params.env,
                     preexec_fn=pre_exec,
                     stdout=stdout,
                     stderr=stderr,
                     stdin=PIPE if params.stdin is not None else None)

        # if input is available, send it to the process
        if params.stdin is not None:
            proc.stdin.write(params.stdin.encode(sys.getfilesystemencoding()))

        # send a notification that the process has been started
        try:
            if self._callback_process is not None:
                self._callback_process(proc.pid, data, store, signal, context)
        except (StopTask, AbortWorkflow):
            proc.terminate()
            raise

        # send the output handling to a thread
        if capture_stdout or capture_stderr:
            output_reader = BashTaskOutputReader(proc, stdout_file,
                                                 stderr_file,
                                                 self._callback_stdout,
                                                 self._callback_stderr,
                                                 params.refresh_time, data,
                                                 store, signal, context)
            output_reader.start()
        else:
            output_reader = None

        # wait for the process to complete and watch for a stop signal
        while proc.poll() is None or\
                (output_reader is not None and output_reader.is_alive()):
            sleep(params.refresh_time)
            if signal.is_stopped:
                proc.terminate()

        if output_reader is not None:
            output_reader.join()
            data = output_reader.data

            # if a stop or abort exception was raised, stop the bash process and re-raise
            if output_reader.exc_obj is not None:
                if proc.poll() is None:
                    proc.terminate()
                raise output_reader.exc_obj

        # send a notification that the process has completed
        if self._callback_end is not None:
            if stdout_file is not None:
                stdout_file.seek(0)
            if stderr_file is not None:
                stderr_file.seek(0)

            self._callback_end(proc.returncode, stdout_file, stderr_file, data,
                               store, signal, context)

        if stdout_file is not None:
            stdout_file.close()

        if stderr_file is not None:
            stderr_file.close()

        return Action(data)
コード例 #43
0
ファイル: sqlite3Tool.py プロジェクト: y1024/Python
def create_excel_file():
    all_staffs_excel = Workbook(encoding='utf-8')
    sheet_one = all_staffs_excel.add_sheet('7月份出勤明细')
    all_staffs_excel.save('all_staffs.xls')
    all_staffs_excel.save(TemporaryFile())
コード例 #44
0
 def test_saved_parameters(self):
     with TemporaryFile() as f:
         flow.misc.savemat(f, out['parameters'])
         params = flow.misc.loadmat(f)
     _compare_dict(orig['parameters'], params)
コード例 #45
0
 def _open_zip(self):
     self.zip_file = TemporaryFile()
     self.zip = ZipFile(self.zip_file, 'w')
コード例 #46
0
ファイル: stream.py プロジェクト: ares57/ctf
class io:
    def __init__(self, data=""):
        self.stream = TemporaryFile(mode="w+b")
        self.stream.write(data)

    def __getitem__(self, key):
        self.stream.seek(key)
        return self.stream.read(1)

    def __setitem__(self, key, item):
        self.stream.seek(key)
        self.stream.write(item)

    def __str__(self):
        self.stream.seek(0)
        return self.stream.read()

    def __len__(self):
        return len(self)

    def save(self, path):
        with open(
                path,
                "w+",
        ) as f:
            f.write(str(self))
コード例 #47
0
ファイル: subscriptions.py プロジェクト: cash2one/source
 def handle(self, *args, **options):
     from django.conf import settings
     from accounts.choices import COUNTRY_CHOICES
     from rent.models import Booking
     log.info('Starting daily insurance subscriptions batch')
     csv_file = TemporaryFile()
     latin1csv_file = codecs.EncodedFile(csv_file, 'utf-8', 'latin1', 'ignore')
     writer = csv.writer(latin1csv_file, delimiter='|')
     period = (date.today() - timedelta(days=100))
     for booking in Booking.objects.pending().filter(created_at__year=period.year, created_at__month=period.month, created_at__day=period.day):
         row = SortedDict()
         row['Numéro locataire'] = booking.borrower.pk
         row['Login locataire'] = booking.borrower.username
         row['Adresse email'] = booking.borrower.email
         phones = tuple(booking.borrower.phones.all()[:1])
         phone = phones[0] if phones else None
         row['Téléphone locataire'] = phone
         row['Portable locataire'] = phone
         row['Nom'] = smart_str(booking.borrower.last_name.replace("\n", " ").replace("\r", " "))
         row[u'Prénom'] = smart_str(booking.borrower.first_name.replace("\n", " ").replace("\r", " "))
         for address in booking.borrower.addresses.all()[:1]:
             row['Adresse 1'] = smart_str(address.address1.replace("\n", " ").replace("\r", " "))
             row['Adresse 2'] = smart_str(address.address2.replace("\n", " ").replace("\r", " ")) if address.address2 else None
             row['Code postal'] = address.zipcode.replace("\n", " ").replace("\r", " ")
             row['Ville'] = smart_str(address.city.replace("\n", " ").replace("\r", " "))
             row['Pays'] = COUNTRY_CHOICES[address.country]
             break
         else:
             row['Adresse 1'] = \
             row['Adresse 2'] = \
             row['Code postal'] = \
             row['Ville'] = \
             row['Pays'] = \
         row['Numéro propriétaire'] = smart_str(booking.owner.pk)
         row['Login propriétaire'] = smart_str(booking.owner.username)
         row['Adresse email propriétaire'] = booking.owner.email
         phones = tuple(booking.owner.phones.all()[:1])
         phone = phones[0] if phones else None
         row['Téléphone propriétaire'] = phone
         row['Portable propriétaire'] = phone
         row['Nom propriétaire'] = smart_str(booking.owner.last_name.replace("\n", " ").replace("\r", " "))
         row[u'Prénom propriétaire'] = smart_str(booking.owner.first_name.replace("\n", " ").replace("\r", " "))
         for address in booking.owner.addresses.all()[:1]:
             row['Adresse 1 propriétaire'] = smart_str(address.address1.replace("\n", " ").replace("\r", " "))
             row['Adresse 2 propriétaire'] = smart_str(address.address2.replace("\n", " ").replace("\r", " ") if address.address2 else None)
             row['Code postal propriétaire'] = address.zipcode.replace("\n", " ").replace("\r", " ")
             row['Ville propriétaire'] = smart_str(address.city.replace("\n", " ").replace("\r", " "))
             row['Pays propriétaire'] = COUNTRY_CHOICES[address.country]
             break
         else:
             row['Adresse 1 propriétaire'] = \
             row['Adresse 2 propriétaire'] = \
             row['Code postal propriétaire'] = \
             row['Ville propriétaire'] = \
             row['Pays propriétaire'] = None
         row['Numéro police'] = settings.POLICY_NUMBER
         row['Numéro partenaire'] = settings.PARTNER_NUMBER
         row['Numéro contrat'] = 500000 + booking.contract_id
         row['Date d\'effet de la location'] = booking.started_at.strftime("%Y%m%d")
         row[u'Numéro de commande'] = booking.uuid
         try:
             product = booking.product
             row['Type de produit'] = smart_str(product._get_category().name)
             row[u'Désignation'] = smart_str(product.description.replace("\n", " ").replace("\r", " "))
             row['Informations complémentaires produit'] = smart_str(product.summary.replace("\n", " ").replace("\r", " "))
         except ObjectDoesNotExist:
             row['Type de produit'] = \
             row[u'Désignation'] = \
             row['Informations complémentaires produit'] = None
         row['Prix de la location TTC'] = comma_separated(booking.total_amount)
         row['Montant de la Caution'] = comma_separated(booking.deposit_amount)
         row[u'Durée de garantie'] = (booking.ended_at - booking.started_at).days
         try:
             row[u'Prix de cession de l\'assurance HT'] = comma_separated(round(booking.insurance_fee, 2))
             row['Com. du partenaire'] = comma_separated(round(booking.insurance_commission, 2))
             row['Taxes assurance à 9%'] = comma_separated(round(booking.insurance_taxes, 2))
         except ObjectDoesNotExist:
             row[u'Prix de cession de l\'assurance HT'] = \
             row['Com. du partenaire'] = \
             row['Taxes assurance à 9%'] = None
         row['Cotisation TTC'] = comma_separated(round(booking.insurance_amount, 2))
         writer.writerow(row.values())
     latin1csv_file.seek(0)
     log.info('Uploading daily insurance subscriptions')
     ftp = FTP(settings.INSURANCE_FTP_HOST)
     ftp.login(settings.INSURANCE_FTP_USER, settings.INSURANCE_FTP_PASSWORD)
     # set FTP PASSIVE mode; disabled by default
     ftp.set_pasv(getattr(settings, 'INSURANCE_FTP_PASSIVE_MODE', 0))
     if settings.INSURANCE_FTP_CWD:
         ftp.cwd(settings.INSURANCE_FTP_CWD)
     ftp.storlines("STOR subscriptions-eloue-%s-%s-%s.csv" % (period.year, period.month, period.day), latin1csv_file)
     ftp.quit()
     log.info('Finished daily insurance subscriptions batch')
コード例 #48
0
ファイル: sqlite3Tool.py プロジェクト: y1024/Python
                print '出差结束时间未归公司'
                sheet_one.write(excel_row, all_staffs_sign_out_date,
                                go_to_out_date_end_time_end)
                sheet_one.write(excel_row,
                                all_staffs_sign_out_date_description, '外出')
            else:
                print '出差结束后归公司'

                # if be_late(go_to_out_date_start_time):
                #     print '早晨未打卡直接出差'
                # else:
                #     print '早晨打卡后直接出差'
                # if leave_early(go_to_out_date_end_time):
                #     print '出差结束时间未归公司'
                # else:
                #     print '出差结束后归公司'
                # for row in rows:
                #     go_to_out_date_start_time = row[go_to_out_date_start_index]
                #     go_to_out_date_end_time = row[go_to_out_date_end_index]
                #     if be_late(go_to_out_date_start_time):
                #         print '早晨未打卡直接出差'
                #     else:
                #         print '早晨打卡后直接出差'
                #     if leave_early(go_to_out_date_end_time):
                #         print '出差结束时间未归公司'
                #     else:
                #         print '出差结束后归公司'

all_staffs_excel.save('all_staffs.xls')
all_staffs_excel.save(TemporaryFile())
コード例 #49
0
传感器采集数据,每收集到1g数据,数据分析,最终
只保存分析结果,临时文件常驻内存,消耗内存资源
使用临时文件存储临时数据(外部存储)
临时文件不用命名,关闭后自动被删除

解决:
tempfile模块 下 TemporaryFile, NamedTemporaryFile
'''
from tempfile import TemporaryFile, NamedTemporaryFile

# TemporaryFile(mode='w+b', bufsize=-1, suffix='', prefix='tmp',dir=None)



# 得到临时文件对象,只能通过对象f访问,无法在系统路径找到
f = TemporaryFile()

# 临时数据放入到临时文件
f.write(b'abcdef' * 100000)

# 读取临时数据,操作文件指针
f.seek(0)

# 根据需求,每次读入
f.read(100)

#-------------------------
# NamedTemporaryFile(mode='w+b', bufsize=-1, suffix='', prefix='tmp',dir=None,delete=True)

# 每次重新创建,垃圾回收会自动删除文件
ntf = NamedTemporaryFile()
コード例 #50
0
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# author : haymai
"""
你需要在程序执行时创建一个临时文件或目录,并希望使用完之后可以自动销毁
掉。
"""
from tempfile import NamedTemporaryFile, TemporaryDirectory
from tempfile import TemporaryFile

if __name__ == '__main__':
    with TemporaryFile('w+t') as f:
        # Read/write to the file
        f.write('Hello World\n')
        f.write('Testing\n')
        # Seek back to beginning and read the data
        f.seek(0)
        data = f.read()

    f = TemporaryFile('w+t')
    f.close()
    """
    在大多数 Unix 系统上,通过 TemporaryFile() 创建的文件都是匿名的,甚至连目
    录都没有。如果你想打破这个限制,可以使用 NamedTemporaryFile() 来代替。
    """

    with NamedTemporaryFile('w+t') as f:
        print('filename is:', f.name)
    """
    临时目录
    """
コード例 #51
0
 def testDumpCount(self):
     parser = ParserMock(DATA + DATA)
     self.assertEqual(2, _dump(TemporaryFile(), self.out, parser, False))
コード例 #52
0
    def import_lang(self, cr, uid, ids, context=None):
        if context is None:
            context = {}
        this = self.browse(cr, uid, ids[0])
        if this.overwrite:
            context = dict(context, overwrite=True)
        fileobj = TemporaryFile('w+')
        try:
            fileobj.write(base64.decodestring(this.data))

            # now we determine the file format
            fileobj.seek(0)
            first_line = fileobj.readline().strip().replace('"', '').replace(
                ' ', '')
            fileformat = first_line.endswith(
                "type,name,res_id,src,value") and 'csv' or 'po'
            fileobj.seek(0)

            tools.trans_load_data(cr,
                                  fileobj,
                                  fileformat,
                                  this.code,
                                  lang_name=this.name,
                                  context=context)
        finally:
            fileobj.close()
        return True
コード例 #53
0
ファイル: formparser.py プロジェクト: JnrnZEDb/NZBMegaSearch
def default_stream_factory(total_content_length, filename, content_type,
                           content_length=None):
    """The stream factory that is used per default."""
    if total_content_length > 1024 * 500:
        return TemporaryFile('wb+')
    return StringIO()
コード例 #54
0
    def __call__(self, orig, dest):
        old_inkscape = mpl._get_executable_info("inkscape").version < "1"
        terminator = b"\n>" if old_inkscape else b"> "
        if not hasattr(self, "_tmpdir"):
            self._tmpdir = TemporaryDirectory()
        if (not self._proc  # First run.
                or self._proc.poll() is not None):  # Inkscape terminated.
            env = {
                **os.environ,
                # If one passes e.g. a png file to Inkscape, it will try to
                # query the user for conversion options via a GUI (even with
                # `--without-gui`).  Unsetting `DISPLAY` prevents this (and
                # causes GTK to crash and Inkscape to terminate, but that'll
                # just be reported as a regular exception below).
                "DISPLAY":
                "",
                # Do not load any user options.
                "INKSCAPE_PROFILE_DIR":
                os.devnull,
            }
            # Old versions of Inkscape (e.g. 0.48.3.1) seem to sometimes
            # deadlock when stderr is redirected to a pipe, so we redirect it
            # to a temporary file instead.  This is not necessary anymore as of
            # Inkscape 0.92.1.
            stderr = TemporaryFile()
            self._proc = subprocess.Popen(
                ["inkscape", "--without-gui", "--shell"]
                if old_inkscape else ["inkscape", "--shell"],
                stdin=subprocess.PIPE,
                stdout=subprocess.PIPE,
                stderr=stderr,
                env=env,
                cwd=self._tmpdir.name)
            # Slight abuse, but makes shutdown handling easier.
            self._proc.stderr = stderr
            try:
                self._read_until(terminator)
            except _ConverterError as err:
                raise OSError("Failed to start Inkscape in interactive "
                              "mode") from err

        # Inkscape's shell mode does not support escaping metacharacters in the
        # filename ("\n", and ":;" for inkscape>=1).  Avoid any problems by
        # running from a temporary directory and using fixed filenames.
        inkscape_orig = Path(self._tmpdir.name, os.fsdecode(b"f.svg"))
        inkscape_dest = Path(self._tmpdir.name, os.fsdecode(b"f.png"))
        try:
            inkscape_orig.symlink_to(Path(orig).resolve())
        except OSError:
            shutil.copyfile(orig, inkscape_orig)
        self._proc.stdin.write(
            b"f.svg --export-png=f.png\n" if old_inkscape else
            b"file-open:f.svg;export-filename:f.png;export-do;file-close\n")
        self._proc.stdin.flush()
        try:
            self._read_until(terminator)
        except _ConverterError as err:
            # Inkscape's output is not localized but gtk's is, so the output
            # stream probably has a mixed encoding.  Using the filesystem
            # encoding should at least get the filenames right...
            self._proc.stderr.seek(0)
            raise ImageComparisonFailure(self._proc.stderr.read().decode(
                sys.getfilesystemencoding(), "replace")) from err
        os.remove(inkscape_orig)
        shutil.move(inkscape_dest, dest)
コード例 #55
0
def fftype(mol,
           rtfFile=None,
           prmFile=None,
           method='GAFF2',
           acCharges=None,
           tmpDir=None,
           netcharge=None):
    """
    Assing atom types and force field parameters for a given molecule.
    Additionally, atom masses and improper dihedral are set.
    Optionally, atom charges can be set if `acCharges` is set (see below).

    The assignment can be done:
      1. For CHARMM CGenFF_2b6 with MATCH (method = 'CGenFF_2b6');
      2. For AMBER GAFF with antechamber (method = 'GAFF');
      3. For AMBER GAFF2 with antechamber (method = 'GAFF2');

    Parameters
    ----------
    mol : Molecule
        Molecule to use for the assignment
    rtfFile : str
        Path to a RTF file from which to read the topology
    prmFile : str
        Path to a PRM file from which to read the parameters
    method : str
        Atomtyping assignment method.
        Use :func:`fftype.listFftypemethods <htmd.parameterization.fftype.listFftypemethods>` to get a list of available
        methods.
        Default: :func:`fftype.defaultFftypemethod <htmd.parameterization.fftype.defaultFftypemethod>`
    acCharges : str
        Optionally assign charges with antechamber. Check `antechamber -L` for available options.
        Note: only works for GAFF and GAFF2.
    tmpDir: str
        Directory for temporary files. If None, a directory is created and
        deleted automatically.
    netcharge : float
        The net charge of the molecule.

    Returns
    -------
    prm : :class:`ParameterSet <parmed.parameters.ParameterSet>` object
        Returns a parmed ParameterSet object with the parameters.
    mol : :class:`Molecule <moleculekit.molecule.Molecule>` object
        The modified Molecule object with the matching atom types for the ParameterSet
    """

    import parmed

    if method not in fftypemethods:
        raise ValueError('Invalid method {}. Available methods {}'.format(
            method, ','.join(fftypemethods)))

    if method == 'CGenFF_2b6' and acCharges:
        raise ValueError('acCharges')

    if netcharge is None:
        netcharge = int(round(np.sum(mol.charge)))
        logger.warning(
            'Molecular charge is set to {} by adding up the atomic charges'.
            format(netcharge))

    if rtfFile and prmFile:

        from htmd.parameterization.readers import readRTF

        logger.info('Reading FF parameters from {} and {}'.format(
            rtfFile, prmFile))
        prm = parmed.charmm.CharmmParameterSet(rtfFile, prmFile)
        names, elements, atomtypes, charges, masses, impropers = readRTF(
            rtfFile)

    else:
        logger.info('Assigning atom types with {}'.format(method))

        renamed_mol = _canonicalizeAtomNames(mol)

        # Create a temporary directory
        with TemporaryDirectory() as tmpdir:

            # HACK to keep the files
            tmpdir = tmpdir if tmpDir is None else tmpDir
            logger.debug('Temporary directory: {}'.format(tmpdir))

            if method in ('GAFF', 'GAFF2'):

                from moleculekit.molecule import Molecule
                from htmd.parameterization.readers import readPREPI, readFRCMOD

                # Write the molecule to a file
                renamed_mol.write(os.path.join(tmpdir, 'mol.mol2'))

                atomtype = method.lower()

                # Set arguments
                cmd = [
                    'antechamber', '-at', atomtype, '-nc',
                    str(netcharge), '-fi', 'mol2', '-i', 'mol.mol2', '-fo',
                    'prepi', '-o', 'mol.prepi'
                ]
                if acCharges is not None:
                    cmd += ['-c', acCharges]

                # Run antechamber
                with TemporaryFile() as stream:
                    if subprocess.call(
                            cmd, cwd=tmpdir, stdout=stream,
                            stderr=stream) != 0:
                        raise RuntimeError('"antechamber" failed')
                    stream.seek(0)
                    for line in stream.readlines():
                        logger.debug(line)

                # Set arguments
                cmd = [
                    'parmchk2', '-f', 'prepi', '-s', atomtype, '-i',
                    'mol.prepi', '-o', 'mol.frcmod', '-a', 'Y'
                ]

                # Run parmchk2
                with TemporaryFile() as stream:
                    if subprocess.call(
                            cmd, cwd=tmpdir, stdout=stream,
                            stderr=stream) != 0:
                        raise RuntimeError('"parmchk2" failed')
                    stream.seek(0)
                    for line in stream.readlines():
                        logger.debug(line)

                # Check if antechamber did changes in atom names (and suggest the user to fix the names)
                acmol = Molecule(os.path.join(tmpdir, 'NEWPDB.PDB'),
                                 type='pdb')
                acmol.name = np.array([n.upper()
                                       for n in acmol.name]).astype(np.object)
                changed_mol_acmol = np.setdiff1d(renamed_mol.name, acmol.name)
                changed_acmol_mol = np.setdiff1d(acmol.name, renamed_mol.name)
                if len(changed_mol_acmol) != 0 or len(changed_acmol_mol) != 0:
                    raise RuntimeError(
                        'Initial atom names {} were changed by antechamber to {}. '
                        'This probably means that the start of the atom name does not match '
                        'element symbol. '
                        'Please check the molecule.'
                        ''.format(','.join(changed_mol_acmol),
                                  ','.join(changed_acmol_mol)))

                # Read the results
                prm = parmed.amber.AmberParameterSet(
                    os.path.join(tmpdir, 'mol.frcmod'))
                names, atomtypes, charges, impropers = readPREPI(
                    renamed_mol, os.path.join(tmpdir, 'mol.prepi'))
                masses, elements = readFRCMOD(
                    atomtypes, os.path.join(tmpdir, 'mol.frcmod'))

            elif method == 'CGenFF_2b6':

                from htmd.parameterization.readers import readRTF

                # Write the molecule to a file
                renamed_mol.write(os.path.join(tmpdir, 'mol.pdb'))

                # Set arguments
                cmd = [
                    'match-typer', '-charge',
                    str(netcharge), '-forcefield', 'top_all36_cgenff_new',
                    'mol.pdb'
                ]

                # Run match-type
                with TemporaryFile() as stream:
                    if subprocess.call(
                            cmd, cwd=tmpdir, stdout=stream,
                            stderr=stream) != 0:
                        raise RuntimeError('"match-typer" failed')
                    stream.seek(0)
                    for line in stream.readlines():
                        logger.debug(line)

                prm = parmed.charmm.CharmmParameterSet(
                    os.path.join(tmpdir, 'mol.rtf'),
                    os.path.join(tmpdir, 'mol.prm'))
                names, elements, atomtypes, charges, masses, impropers = readRTF(
                    os.path.join(tmpdir, 'mol.rtf'))

            else:
                raise ValueError('Invalid method {}'.format(method))

        assert np.all(renamed_mol.name == names)

    assert np.all(mol.element == elements)

    mol = mol.copy()
    mol.atomtype = atomtypes
    mol.masses = masses
    mol.impropers = impropers
    if acCharges is not None:
        mol.charge = charges

    return prm, mol
コード例 #56
0
        #cv2.imshow('figureSquare1', img)

        imgResize1 = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        imgResize = cv2.resize(imgResize1, (outWidth, outHeight),
                               interpolation=cv2.INTER_AREA)
        #imgResize = cv2.resize(img, (20, 20) , interpolation = cv2.INTER_AREA )

        cv2.waitKey(int(math.ceil(dt)))
        writerOut.write(imgResize)

    writerOut.release()

    cv2.destroyAllWindows()

    from tempfile import TemporaryFile
    outfile = TemporaryFile()
    data = []
    data = np.column_stack((tStimulus, theta))
    np.save(outfile, data)

print len(yPosPix)

path.append('/home/atilla/Documents/Test/Neural_Network2/Stimulus/Data/')
import dataAnalysis as datAn

filePath = '/home/atilla/Documents/Test/Neural_Network2/Stimulus/Data/'


def plotPixels(xFrameSpike, fps, xPosPix, yPosPix, tPix5):
    xTimeSpike = np.divide(xFrameSpike, fps)
    for i in range(np.size(xFrameSpike)):
コード例 #57
0
def run_demographics(payload_dict):
    logging.info(f"run_demographics:: {payload_dict}")

    metric_names_ref = {
        "vaccinations-by-vaccination-date": {
            "age-demographics": {
                "metric_name": "vaccinationsAgeDemographics",
                "main_metrics": ['areaType', 'areaCode', 'areaName', 'date', 'age']
            }
        },
        "cases-by-specimen-date": {
            "age-demographics": {
                "metric_name": "newCasesBySpecimenDateAgeDemographics",
                "base_metric": "newCasesBySpecimenDate",
                "db_payload_metric": "cases",
                "main_metrics": ['areaType', 'areaCode', 'areaName', 'date', 'age']
            }
        },
        "deaths28days-by-death-date": {
            "age-demographics": {
                "metric_name": "newDeaths28DaysByDeathDateAgeDemographics",
                "base_metric": "newDeaths28DaysByDeathDate",
                "db_payload_metric": "deaths",
                "main_metrics": ['areaType', 'areaCode', 'areaName', 'date', 'age']
            }
        },
        "first-episodes-by-specimen-date": {
            "age-demographics": {
                "metric_name": "newFirstEpisodesBySpecimenDateAgeDemographics",
                "base_metric": "newFirstEpisodesBySpecimenDate",
                "db_payload_metric": "cases",
                "main_metrics": ['areaType', 'areaCode', 'areaName', 'date', 'age']
            }
        },
        "reinfections-by-specimen-date": {
            "age-demographics": {
                "metric_name": "newReinfectionsBySpecimenDateAgeDemographics",
                "base_metric": "newReinfectionsBySpecimenDate",
                "db_payload_metric": "cases",
                "main_metrics": ['areaType', 'areaCode', 'areaName', 'date', 'age']
            }
        },
    }

    payload = RawDataPayload(**payload_dict["base"])
    category = payload_dict['category']
    subcategory = payload_dict['subcategory']
    area_type = payload_dict['area_type']
    area_code = payload_dict['area_code']
    date = payload_dict['date']

    metadata = metric_names_ref[category][subcategory]
    metric_name = metadata["metric_name"]

    kws = dict(
        container="pipeline",
        content_type="application/octet-stream",
        cache_control="no-cache, max-age=0, must-revalidate",
        compressed=False,
        tier='Cool'
    )

    # Retrieve data chunk
    with StorageClient(**kws, path=payload.data_path) as client, TemporaryFile() as fp:
        if not client.exists():
            raise RuntimeError(f"Blob not found: {payload.data_path}")

        client.download().readinto(fp)
        fp.seek(0)
        data = read_feather(fp)

    logging.info(f"\tLoaded and parsed population data")

    main_metrics = metadata["main_metrics"]
    metrics = data.columns[~data.columns.isin(main_metrics)]

    db_payload_metric = metadata.get("db_payload_metric")
    if db_payload_metric is not None:
        metrics = [db_payload_metric, "rollingSum", "rollingRate"]
        logging.info(metrics)

    result = (
        data
        .pipe(homogenise_demographics_dates)
        .set_index(main_metrics)
        .pipe(
            normalise_demographics_records,
            zero_filled=FILL_WITH_ZEROS,
            cumulative=START_WITH_ZERO
        )
        .pipe(
            metric_specific_processes,
            base_metric=metadata.get("base_metric"),
            db_payload_metric=db_payload_metric
        )
        .groupby(main_metrics[:-1])
        .apply(lambda x: x.loc[:, [main_metrics[-1], *metrics]].to_dict(orient="records"))
        .reset_index()
        .rename(columns={0: metric_name})
    )

    # Store chunk for deployment to DB
    result_path = f"daily_chunks/{category}/{subcategory}/{date}/{area_type}_{area_code}.ft"
    with TemporaryFile() as fp:
        result.reset_index(drop=True).to_feather(fp)
        fp.seek(0)

        with StorageClient(**kws, path=result_path) as cli:
            cli.upload(fp.read())

    response_payload = {
        "path": result_path,
        "area_code": area_code,
        "area_type": area_type,
        "date": date,
        "environment": payload.environment,
        "category": category,
        "subcategory": subcategory
    }

    return response_payload
コード例 #58
0
ファイル: try-fast.v2.py プロジェクト: zengxi-hada/HIVID2
import gzip
import os
from Bio import SeqIO
from tempfile import TemporaryFile
import argparse
import sys
import re
reload(sys) 
sys.setdefaultencoding('utf-8')
sys.setrecursionlimit(10000)

## author
#Zhou Yi

bin_dir = os.path.dirname(os.path.abspath(sys.argv[0]))
temp = TemporaryFile()

cmd_parser = argparse.ArgumentParser(description='re-evaluate uniq_read in support_read')
cmd_parser.add_argument('-fq1', help='fastq1 after remove adapter')
cmd_parser.add_argument('-fq2', help='fastq2 after remove adapter')
cmd_parser.add_argument('-i', help='human-uniq infile')
cmd_parser.add_argument('-o', help='outfile, directory and name')
cmd_parser.add_argument('-erate',type=float,default=0.04,help='error rate of sequencing for allowing mismatch')
cmd_parser.add_argument('-ref',default="./ref.list",help='ref genome ID list')
cmd_parser.add_argument('-id',help='sample name')
cmd_args = cmd_parser.parse_args()

#互补序列  不需要反向
def revseq(seq):
	seq=seq.replace("A","t").replace("T","a").replace("G","c").replace("C","g")
	seq=seq.upper()  #[::-1]
    q[1] = np.random.uniform(low=0.0, high=0.1)
    q[2] = np.random.uniform(low=0.0, high=0.1)
    q[3] = np.random.uniform(low=0.0, high=1.0)
    q[4] = np.random.uniform(low=0.0, high=1.0)
    return q

burnin = 1000000
T = 2000000

covid = pytwalk.pytwalk(n=5,U=energy,Supp=support)
y0=init()
yp0=init()
covid.Run(T,y0,yp0)

    
cadena=TemporaryFile()
np.save('covid/cadena',covid.Output)


chain = covid.Output

energy = chain[:,-1]
#############################################
### Computing the MAP estimate
energy_MAP = min(energy)
loc_MAP = np.where(energy==energy_MAP)[0]
MAP = chain[loc_MAP[-1]]
MAP = MAP[:-1]

### Computing the posterior mean
Post_mean = np.ones(5)
    for index in range(start, end):

        count += 1
        if A[index] < A[end]:  # check if current val is less than pivot value
            newPivotIndex = newPivotIndex + 1
            temp = A[newPivotIndex]
            A[newPivotIndex] = A[index]
            A[index] = temp

    temp = A[newPivotIndex + 1]
    A[newPivotIndex + 1] = A[end]
    A[end] = temp
    return newPivotIndex + 1, count


outfile = TemporaryFile()
p = 100  # 1000 elements are to be sorted


mu, sigma = 0, 1  # mean and standard deviation
X = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)


outfile.seek(0)  # using the same array
M = np.load(outfile)
r = len(M) - 1
z = _inPlaceQuickSort(M, 0, r)