Example #1
0
def save_db(request):
    # These are inside the function because they are quite large, and we want to
    # keep them in local scope, not global
    from StringIO import StringIO
    from settings import TABLE_DUMP_ORDER

    # Generate the dumpdata and save as strings in exports
    exports = []
    for tl in TABLE_DUMP_ORDER:
        buf = StringIO()
        management.call_command('dumpdata', *tl, verbosity=1, indent=2, format='json', stdout=buf)
        buf.seek(0)
        exports.append(buf.read())
    # exports now contains each of the JSON blobs to import, in THAT exact order.

    # from https://code.djangoproject.com/wiki/CookBookDynamicZip
    response = HttpResponse(mimetype='application/zip')
    response['Content-Disposition'] = 'filename=election.zip'
    #now add them to a zip file. note the zip only exist in memory as you add to it
    buffer = StringIO()
    electionzip = zipfile.ZipFile(buffer, "w", zipfile.ZIP_DEFLATED)
    for index, election_data in enumerate(exports):
        electionzip.writestr("election_data_%03d.json" % (index,), election_data)
    electionzip.close()
    buffer.flush()
    #the import detail--we return the content of the buffer
    ret_zip = buffer.getvalue()
    buffer.close()
    response.write(ret_zip)
    return response
Example #2
0
    def download_file(self, device_id, file_id, out=None):
        """Download an individual file from the iCloud Backup


        Arguments:
        device_id   -- Device ID to pull file from
        file_id     -- File ID representing the file we want to download

        Keyword Arguments:
        out         -- File like object to write response to. If not
                       provided we will write the object to memory.
        """
        if not out:
            out = StringIO()

        post_data = {
            'key': self.api.session_key,
            'device': device_id,
            'file': file_id,
        }

        response = requests.post(settings.get('endpoints', 'download_file'),
                                    auth=self.api.auth, data=post_data,
                                    stream=True, headers=self.api.headers)

        for chunk in response.iter_content(chunk_size=1024):
            if chunk:
                out.write(chunk)
        out.flush()

        return out
Example #3
0
def make_thumbnail(filepath, name=None):
    with open(filepath, 'rb') as f:
        flock(f, LOCK_EX)
        imthumb = PIL.Image.open(f)
        imthumb.thumbnail((THUMBNAIL_SIZE), PIL.Image.ANTIALIAS)
        tf = StringIO()
        imthumb.save(tf, 'JPEG', quality=THUMBNAIL_QUALITY)
        tf.flush()
        digest = hash_file(tf)
        thumbpath = path.join(settings.MEDIA_ROOT, digest)
        tf.seek(0, 0)
        with open(thumbpath, 'wb+') as f:
            for chunk in iter(lambda: tf.read(1024), ''):
                f.write(chunk)
            size = f.tell()

    if name is None:
        name = 'thumbnail.jpeg'
    else:
        name = path.splitext(path.basename(name))[0] + '-thumbnail.jpeg'

    thumbnail = Image(
            name    = name,
            sha1sum = digest,
            size    = size,
            mime    = 'image/jpeg',
    )
    thumbnail.save()
    return thumbnail
Example #4
0
 def deLexWithCharPositions(self):
     from StringIO import StringIO
     line = 1
     col = 0
     src = StringIO()
     charpositions = {}
     for j in range(0, len(self)):
         l = self[j]
         if line < l[2][0]:
             src.write(os.linesep * (l[2][0] - line))
             col = 0
             line += (l[2][0] - line)
         if col < l[2][1]:
             src.write(" " * (l[2][1] - col))
             col += (l[2][1] - col)
         #for i in range(len(src), len(src) + len(l[1])):
         #charpositions[i] = j
         src.write(l[1])
         nls = l[1].count(os.linesep)
         if (nls > 0):
             line += nls
             col = len(l[1].splitlines().pop())
         else:
             col += len(l[1])
     src.flush()
     return (src.getvalue(), charpositions)
Example #5
0
class _FakeLOB(object):

    def __init__(self, data=None):
        self._io = StringIO()
        if data is not None:
            self.write(data)

    def close(self):
        pass

    def isopen(self):
        return not self._io.closed

    def open(self):
        pass

    def read(self, offset=1, amount=None):
        self._io.seek(offset - 1)
        return self._io.read(amount)

    def size(self):
        return len(self.read())

    def trim(self, newSize=0):
        return self._io.truncate(newSize)

    def getchunksize(self):
        return None

    def write(self, data, offset=1):
        self._io.seek(offset - 1)
        self._io.write(data)
        self._io.flush()
Example #6
0
    def flush(self):
        """ Write the whole buffer to Amazon's server, overwriting any existing object. """
        StringIO.flush(self)

        if not self.dirty:
            return  #nothing has been written to the buffer, so there isn't anything to flush

        if self.len == 0:
            raise S3IOError("String length must be greater than zero.")

        if self.len > self.MAX_OBJECT_SIZE:
            raise S3IOError("String length must not exceed %s bytes." %
                            self.MAX_OBJECT_SIZE)

        #if self.sent_len == self.len:
        #    return

        obj = self.getvalue()

        logging.info('flushing %s.%s meta: %s' %
                     (self.bucket_name, self.object_name, self.meta))

        #write the full buffer
        response = self.conn.put(self.bucket_name, self.object_name,
                                 S3Object(str(obj)), self.meta)

        if response.http_response.status != 200:
            raise S3ResponseError, response

        logging.debug('flush successful')
        self.sent_len = self.len
        self.dirty = False
Example #7
0
def bio3graph_build_default_vocabulary_custom_compounds(input_dict):
    from triplet_extractor import tripletExtraction as te
    from os.path import normpath, join, dirname
    from StringIO import StringIO

    comp = input_dict['compounds']

    dname = normpath(dirname(__file__))
    voc = te.Vocabulary()
    s = StringIO()
    s.write(comp)
    s.flush()
    voc.loadCompounds_file(s)
    voc.loadPredicates_files(
        activationFname=join(dname,
                             'triplet_extractor/vocabulary/activation.lst'),
        activations_rotate=join(
            dname, 'triplet_extractor/vocabulary/activation_rotate.lst'),
        inhibitionFname=join(dname,
                             'triplet_extractor/vocabulary/inhibition.lst'),
        bindingFname=join(dname, 'triplet_extractor/vocabulary/binding.lst'),
        activationFname_passive=join(
            dname, 'triplet_extractor/vocabulary/activation_pas.lst'),
        inhibitionFname_passive=join(
            dname, 'triplet_extractor/vocabulary/inhibition_pas.lst'),
        bindingFname_passive=join(
            dname, 'triplet_extractor/vocabulary/binding_pas.lst'))
    return {'vocabulary': voc}
Example #8
0
class LogCapture(ContextDecorator):
    def __enter__(self):
        self.buf = StringIO()

        self.oldLogLevel = log.getEffectiveLevel()
        log.setLevel(logging.INFO)

        self.oldLogger = log.handlers[0]
        log.removeHandler(self.oldLogger)

        self.logHandler = logging.StreamHandler(self.buf)
        formatter = logging.Formatter("[%(levelname)s] %(message)s")
        self.logHandler.setFormatter(formatter)

        log.addHandler(self.logHandler)
        return self

    def __exit__(self, *args):
        # Restore logging level
        log.setLevel(self.oldLogLevel)
        log.removeHandler(self.logHandler)
        log.addHandler(self.oldLogger)

        return False

    def getOutput(self):
        self.logHandler.flush()
        self.buf.flush()

        output = re.sub(r'^\[\w+\]\s+', '', self.buf.getvalue(), flags=re.M)

        return output
Example #9
0
class LogCapture(ContextDecorator):
    def __enter__(self):
        self.buf = StringIO()

        self.oldLogLevel = log.getEffectiveLevel()
        log.setLevel(logging.INFO)

        self.oldLogger = log.handlers[0]
        log.removeHandler(self.oldLogger)

        self.logHandler = logging.StreamHandler(self.buf)
        formatter = logging.Formatter("[%(levelname)s] %(message)s")
        self.logHandler.setFormatter(formatter)

        log.addHandler(self.logHandler)
        return self

    def __exit__(self, *args):
        # Restore logging level
        log.setLevel(self.oldLogLevel)
        log.removeHandler(self.logHandler)
        log.addHandler(self.oldLogger)

        return False

    def getOutput(self):
        self.logHandler.flush()
        self.buf.flush()

        output = re.sub(r'^\[\w+\]\s+', '', self.buf.getvalue(), flags=re.M)

        return output
Example #10
0
def android_fplan_bitmap_format(hmap):
    out = StringIO()
    print "Binary hmap download in progress"

    def writeFloat(f):
        out.write(pack(">f", f))

    def writeInt(i):
        assert i >= -(1 << 31) and i < (1 << 31)
        out.write(pack(">I", i))

    def writeBuf(encoded):
        assert type(encoded) == str
        l = len(encoded)
        assert l < (1 << 31)
        out.write(pack(">I", l))  #short
        out.write(encoded)

    writeInt(len(hmap))  #zoomlevels
    for zoomlevel, tiles in hmap.items():
        writeInt(zoomlevel)
        nonemptytiles = [(merc, tile) for (merc, tile) in tiles.items()
                         if tile]
        writeInt(len(nonemptytiles))  #tiles in this zoomlevel
        for merc, tile in nonemptytiles:
            #print "Zoom: %d Merc: %s"%(zoomlevel,merc)
            writeInt(merc[0])
            writeInt(merc[1])
            #assert len(tile)==2*2*64*64
            #print "Tile len is:%d"%(len(tile,))
            writeBuf(tile)
    writeInt(0x1beef)  #Magic to verify writing
    out.flush()
    print "Binary hmap download complete"
    return out.getvalue()
Example #11
0
class _PythonCapturer(object):

    def __init__(self, stdout=True):
        if stdout:
            self._original = sys.stdout
            self._set_stream = self._set_stdout
        else:
            self._original = sys.stderr
            self._set_stream = self._set_stderr
        self._stream = StringIO()
        self._set_stream(self._stream)

    def _set_stdout(self, stream):
        sys.stdout = stream

    def _set_stderr(self, stream):
        sys.stderr = stream

    def release(self):
        # Original stream must be restored before closing the current
        self._set_stream(self._original)
        self._stream.flush()
        output = self._stream.getvalue()
        self._stream.close()
        return output
 def download(self, subtitles):
     login = Login_Itasa()
     for subtitle in subtitles:
         content_type = ''
         attempts = 0
         while 'application/zip' not in content_type:
             url = ITASA_SUBTITLE_DOWNLOAD.format(subtitle['id'], login.authcode, ITASA_KEY)
             file = HTTP.Request(url, cacheTime=0)
             content_type = file.headers['content-type']
             if 'text/xml' in content_type:
                 Log.Debug('[ {} ] Authcode not valid. Trying to retrieve it..'.format(PLUGIN_NAME))
                 login.do_authcode()
             if 'text/html' in content_type and 'utenti registrati' in file.content:
                 Log.Debug('[ {} ] Not logged. Trying to log in'.format(PLUGIN_NAME))
                 login.do_login()
             if 'text/html' in content_type and 'limite di download' in file.content:
                 Log.Debug('[ {} ] You have reached the download limit for this subtitle'.format(PLUGIN_NAME))
                 break
             if attempts > 5:
                 break
             attempts += 1
         filebuffer = StringIO()
         filebuffer.write(file)
         filebuffer.flush()
         Log.Debug('[ {} ] Subtitle {} (id: {}) downloaded!'.format(PLUGIN_NAME, subtitle['name'], subtitle['id']))
         for sub_content in self.unzip(filebuffer):
             sub_hash = hashlib.md5(sub_content).hexdigest()
             subtitle['subs'].append((sub_hash, sub_content))
     return subtitles
Example #13
0
def pickle_stringio():
    '''
    It's quite easy to replace StringIO in the this example with file or socket
    the object definition(class definition) should be exist in both side of server(dump)/client(load) side

    For object can't be pickled, can define some magic mathod help to serialize, refer to SL doc.
    '''
    l = []
    l.append(simple_object('test'))
    l.append(simple_object('pickle'))
    l.append(simple_object('cPickle'))

    out_s = StringIO()
    for i in l:
        print "Write %s:%s" % (i.name, i.name_reversed)
        pickle.dump(i, out_s)
        out_s.flush()

    pprint.pprint(out_s.getvalue())


    in_s = StringIO(out_s.getvalue())

    while(True):
        try:
            o = pickle.load(in_s)
        except EOFError:
            break
        else:
            print "Read %s:%s"% (o.name, o.name_reversed)
Example #14
0
 def flush(self):
     """
     Calls two flush.
     """
     StringIO.flush(self)
     if self.handle:
         self.handle.flush()
Example #15
0
 def getPILImageFromData(self, data):
     fo = StringIO()
     fo.write(data)
     fo.flush()
     fo.seek(0)
     img = Image.open(fo)
     return img
Example #16
0
    def pack(self, files, jad_properties=None):
        jad_properties = jad_properties or {}

        # pack files into jar
        buffer = StringIO(self.jar)
        zipper = ZipFile(buffer, 'a', ZIP_DEFLATED)
        for path in files:
            zipper.writestr(path, files[path])
        zipper.close()
        buffer.flush()
        jar = buffer.getvalue()
        buffer.close()

        # update and sign jad
        signed = False
        if self.jad:
            jad = JadDict.from_jad(self.jad)
            jad.update({
                'MIDlet-Jar-Size': len(jar),
            })
            jad.update(jad_properties)
            if hasattr(settings, 'JAR_SIGN'):
                jad = sign_jar(jad, jar)
                signed = True
        else:
            jad = None

        return JadJar(jad, jar, self.version, self.build_number, signed=signed)
Example #17
0
def release_download(request, project_slug, release_slug, skip=False):
    """
    Download all resources in given release in one handy ZIP file
    """
    project = get_object_or_404(Project, slug=project_slug)
    release = get_object_or_404(Release, slug=release_slug, project=project)

    resources = Resource.objects.filter(releases=release)
    zip_buffer = StringIO()
    zip_file = zipfile.ZipFile(zip_buffer, "w", zipfile.ZIP_DEFLATED)
    for stat in RLStats.objects.select_related(
            'language').by_release_aggregated(release):
        for resource in resources:
            template = _compile_translation_template(resource, stat.object,
                                                     skip)
            zip_file.writestr("%s/%s" % (stat.object.code, resource.name),
                              template)

    zip_file.close()
    zip_buffer.flush()
    zip_contents = zip_buffer.getvalue()

    response = HttpResponse(mimetype='application/zip')
    response['Content-Disposition'] = 'filename=%s_%s.zip' % (project_slug,
                                                              release_slug)
    response.write(zip_contents)
    return response
Example #18
0
    def pack(self, files, jad_properties=None):
        jad_properties = jad_properties or {}

        # pack files into jar
        buffer = StringIO(self.jar)
        with ZipFile(buffer, "a", ZIP_DEFLATED) as zipper:
            for path in files:
                zipper.writestr(path, files[path])
        buffer.flush()
        jar = buffer.getvalue()
        buffer.close()

        # update and sign jad
        signed = False
        if self.jad:
            jad = JadDict.from_jad(self.jad)
            jad.update({"MIDlet-Jar-Size": len(jar)})
            jad.update(jad_properties)
            if hasattr(settings, "JAR_SIGN"):
                jad = sign_jar(jad, jar)
                signed = True
            else:
                jad = jad.render()
        else:
            jad = None

        return JadJar(jad, jar, self.version, self.build_number, signed=signed)
Example #19
0
def release_language_download(request,
                              project_slug,
                              release_slug,
                              lang_code,
                              skip=False):
    """
    Download all resources in given release/language in one handy ZIP file
    """
    project = get_object_or_404(Project, slug=project_slug)
    release = get_object_or_404(Release, slug=release_slug, project=project)
    language = get_object_or_404(Language, code=lang_code)

    zip_buffer = StringIO()
    zip_file = zipfile.ZipFile(zip_buffer, "w", zipfile.ZIP_DEFLATED)
    for resource in Resource.objects.filter(releases=release):
        template = _compile_translation_template(resource, language, skip)
        zip_file.writestr(resource.name, template)

    zip_file.close()
    zip_buffer.flush()
    zip_contents = zip_buffer.getvalue()

    response = HttpResponse(mimetype='application/zip')
    response['Content-Disposition'] = 'filename=%s_%s_%s.zip' % \
        (project_slug, release_slug, lang_code)
    response.write(zip_contents)
    return response
Example #20
0
File: pys3.py Project: wbornor/pys3
 def flush(self):
     """ Write the whole buffer to Amazon's server, overwriting any existing object. """
     StringIO.flush(self)
     
     if not self.dirty:
         return #nothing has been written to the buffer, so there isn't anything to flush
     
     if self.len == 0:
         raise S3IOError("String length must be greater than zero.")
     
     if self.len > self.MAX_OBJECT_SIZE:
         raise S3IOError("String length must not exceed %s bytes." % self.MAX_OBJECT_SIZE)
     
     #if self.sent_len == self.len:
     #    return 
     
     obj = self.getvalue()
     
     logging.info('flushing %s.%s meta: %s' % (self.bucket_name, self.object_name, self.meta))
     
     #write the full buffer    
     response = self.conn.put(self.bucket_name,
                              self.object_name,
                              S3Object(str(obj)),
                              self.meta)
         
     if response.http_response.status != 200:
         raise S3ResponseError, response            
     
     logging.debug('flush successful')
     self.sent_len = self.len
     self.dirty = False
Example #21
0
class Logger(object):

	"""
	filename can be None, it which case logs are buffered in memory until you're ready to write them to a file
	levels is a dictionary of `level: label`, lebel gets prepended to every line of that level
	timestamped turns on timestamping in format [hh:mm:ss]
	append can be flipped to True if you want to continue the log file instead of overwriting it
	"""
	def __init__(self, filename=None, levels=None, timestamped=True, append=False):
		if filename is None:
			self.log = StringIO()
			self.closed = False
		else:
			if append:
				mode = 'a'
			else:
				mode = 'w'
			self.log = open(filename, mode)
			self.closed = self.log.closed
		self.timestamped = timestamped
		self.levels = levels
	
	# if the class was initialised with a null filename, the logs are being written to a buffer. this function moves the buffer into a real file.
	def realize(self, filename):
		if not isinstance(self.log, StringIO):
			return
		buf = self.log.getvalue()
		self.log.close()
		self.log = open(filename, "w")
		self.closed = self.log.closed
		self.log.write(buf)
	
	def close(self):
		self.closed = True
		self.log.close()
		
	def write(self, msg, level=None):
		try:
			msg = msg.strip().encode('utf-8').split('\n')
		except UnicodeDecodeError:
			msg = msg.strip().split('\n')

		if level is not None and self.levels is not None and level not in self.levels:
			return
		
		if len(msg) > 1:
			for m in msg:
				self.write(m, level)
		elif len(msg[0].strip()) > 0:
			if self.timestamped:
				timestamp = strftime("[%H:%M:%S] ", localtime())
			else:
				timestamp = ''
			
			if self.levels is not False and isinstance(self.levels, dict) and level in self.levels.keys():
				self.log.write(timestamp + self.levels[level] + msg[0] + "\n")
			else:
				self.log.write(timestamp + msg[0] + "\n")
			
			self.log.flush()
Example #22
0
class ZipFile(object):
    def __init__(self,
                 jinja_env=None,
                 compress_type=zipfile.ZIP_DEFLATED,
                 external_attr=2175008768):
        self.jinja_env = jinja_env
        self.fileIO = StringIO()
        self.zip = zipfile.ZipFile(self.fileIO, 'w')
        self.default_compress_type = compress_type
        self.default_external_attr = external_attr

    def write_template(self, filename, template, d, **kwargs):
        if not self.jinja_env:
            raise ValueError('Jinja2 Environment is not set')
        self.write(filename,
                   self.jinja_env.get_template(template).render(d), **kwargs)

    def write(self,
              filename,
              content,
              external_attr=2175008768,
              compress_type=None):
        info = zipfile.ZipInfo(filename)
        info.compress_type = compress_type or self.default_compress_type
        info.external_attr = external_attr or self.default_external_attr
        self.zip.writestr(info, content)

    def get_zip(self):
        self.zip.close()
        self.fileIO.flush()
        return self.fileIO
Example #23
0
    def fetch_and_extract_zipped_csv(self, url, expected_filename, dest):
        """Grab a zipfile from a url, and extract a CSV.

        Save it to a datestamped folder if it's different from the
        latest previously-known data

        """
        t = tempfile.mkdtemp()[1]
        f = StringIO()
        f.write(requests.get(url).content)
        f.flush()
        zipfile = ZipFile(f)
        zipfile.extract(expected_filename, t)
        extracted_file_path = "%s/%s" % (t, expected_filename)
        most_recent = self.most_recent_file(dest)
        changed = not filecmp.cmp(
            most_recent, extracted_file_path, shallow=True)
        if changed:
            new_folder = datetime.datetime.today().strftime("%Y_%m")
            new_path = "%s/%s/" % (dest, new_folder)
            os.makedirs(new_path)
            if self.args.verbose:
                print "%s has changed; creating new copy" % most_recent
            shutil.copy(extracted_file_path, new_path)
        shutil.rmtree(t)
Example #24
0
def android_fplan_bitmap_format(hmap):
    out=StringIO()
    print "Binary hmap download in progress"

    def writeFloat(f):
        out.write(pack(">f",f))
    def writeInt(i):
        assert i>=-(1<<31) and i<(1<<31)
        out.write(pack(">I",i))
    def writeBuf(encoded):
        assert type(encoded)==str
        l=len(encoded)
        assert l<(1<<31)
        out.write(pack(">I",l)) #short
        out.write(encoded)
    
    writeInt(len(hmap)) #zoomlevels    
    for zoomlevel,tiles in hmap.items():
        writeInt(zoomlevel)
        nonemptytiles=[(merc,tile) for (merc,tile) in tiles.items() if tile]
        writeInt(len(nonemptytiles)) #tiles in this zoomlevel
        for merc,tile in nonemptytiles:
            #print "Zoom: %d Merc: %s"%(zoomlevel,merc)
            writeInt(merc[0])
            writeInt(merc[1])
            #assert len(tile)==2*2*64*64
            #print "Tile len is:%d"%(len(tile,))
            writeBuf(tile)
    writeInt(0x1beef) #Magic to verify writing
    out.flush()
    print "Binary hmap download complete"
    return out.getvalue()
Example #25
0
 def _parser_to_string_io(parser):
     """Turns a ConfigParser into a StringIO stream."""
     memory_file = StringIO()
     parser.write(memory_file)
     memory_file.flush()
     memory_file.seek(0)
     return memory_file
Example #26
0
    def test_log_call_7(self):
        expected_result = """DEBUG:""" + self.logger.name + """:Entering callable: "test".\n""" + \
        """DEBUG:""" + self.logger.name + """:Arguments: "('c',)\"\n""" + \
        """Keyword Arguments: "{}".\n""" + \
        """ERROR:""" + self.logger.name + """:"""

        @nanshe.util.prof.log_call(self.logger, to_print_args=True, to_print_exception=True)
        def test(a, b=5):
            return(a + b)

        expected_traceback = StringIO()

        try:
            test("c")
        except:
            exc_type, exc_value, exc_traceback = sys.exc_info()

            print("Traceback (most recent call last):", file=expected_traceback)
            traceback.print_tb(exc_traceback.tb_next.tb_next, file=expected_traceback)
            print(exc_type.__name__ + ":", exc_value, file=expected_traceback)

            print(file=expected_traceback)
            expected_traceback.flush()

        expected_result += expected_traceback.getvalue()
        expected_traceback.close()

        self.handler.flush()
        result = self.stream.getvalue()

        print(repr(expected_result))
        print(repr(result))

        assert (result == expected_result)
Example #27
0
def get_all_translations_zip(request, project_slug, mode=None, skip=None):
    """
    Download all resources/languages in given project in one big ZIP file
    """
    project = get_object_or_404(Project, slug=project_slug)
    resources = Resource.objects.filter(project=project)
    zip_buffer = StringIO()
    zip_file = zipfile.ZipFile(zip_buffer, "w", zipfile.ZIP_DEFLATED)
    for stat in RLStats.objects.for_user(request.user).by_project_language_aggregated(project):
        for resource in resources:
            template = _compile_translation_template(resource, stat.object, mode, skip)
            zip_file.writestr("%s/%s" % (stat.object.code, resource.name), template)

    zip_file.close()
    zip_buffer.flush()
    zip_contents = zip_buffer.getvalue()

    if mode == Mode.TRANSLATED:
        subname = "empty"
    elif skip:
        subname = "skipped"
    else:
        subname = "replaced"
    filename = project_slug + "_" + subname
    response = HttpResponse(mimetype='application/zip')
    response['Content-Disposition'] = 'filename=%s.zip' % filename
    response.write(zip_contents)
    return response
Example #28
0
def pack_stream(obj):
    stream_out = StringIO()
    pickle.dump(m, stream_out, protocol=PROTOCOL)
    stream_out.flush()
    stream_in = StringIO(stream_out.getvalue())
    stream = pickle.loads(stream_in.getvalue())
    return stream
def full_context_error_logger():
    """ capture all log specific to a context
    :return:
    """
    from logging.handlers import MemoryHandler
    from StringIO import StringIO
    buffer = StringIO()
    logLevel = logging.DEBUG
    streamhandler = logging.StreamHandler(buffer)
    streamhandler.setLevel(logLevel)
    streamhandler.setFormatter(formatter)
    memory_handler = MemoryHandler(capacity=1024 * 100,
                                   flushLevel=logging.ERROR,
                                   target=streamhandler)
    memory_handler.setLevel(logLevel)
    memory_handler.setFormatter(formatter)
    rootLogger = logging.getLogger()
    rootLogger.addHandler(memory_handler)
    result = {"error_log": None}
    try:
        yield result
    except:
        memory_handler.flush()
        buffer.flush()
        result["error_log"] = buffer.getvalue() + traceback.format_exc()
    finally:
        rootLogger.removeHandler(memory_handler)
        memory_handler.close()
        buffer.close()
class MemoryLogs(object):
  """Collects logs in memory."""

  def __init__(self, logger):
    self._logger = logger
    self._log_buffer = StringIO()
    self._log_handler = logging.StreamHandler(self._log_buffer)
    formatter = logging.Formatter("[%(asctime)s][%(levelname)s] %(message)s",
                                  "%y-%m-%d %H:%M:%S")
    self._log_handler.setFormatter(formatter)

  def Start(self):
    """Starts collecting the logs."""
    self._logger.addHandler(self._log_handler)

  def Flush(self):
    """Stops collecting the logs and returns the logs collected since Start()
    was called.
    """
    self._logger.removeHandler(self._log_handler)
    self._log_handler.flush()
    self._log_buffer.flush()
    result = self._log_buffer.getvalue()
    self._log_buffer.truncate(0)
    return result
Example #31
0
    def parse_pdf_log(self, logfile):
        """
        Strip down tex output to only the warnings, errors etc. and discard all the noise
        :param logfile:
        :return: string
        """
        import logging
        from StringIO import StringIO

        log_buffer = StringIO()
        log_handler = logging.StreamHandler(log_buffer)

        typesetter = Typesetter(self.tmp('tex'))
        typesetter.halt_on_errors = False

        handlers = typesetter.logger.handlers
        for handler in handlers:
            typesetter.logger.removeHandler(handler)

        typesetter.logger.addHandler(log_handler)
        typesetter.process_log(logfile)

        typesetter.logger.removeHandler(log_handler)

        log_handler.flush()
        log_buffer.flush()

        return log_buffer.getvalue()
Example #32
0
 def _parser_to_string_io(parser):
     """Turns a ConfigParser into a StringIO stream."""
     memory_file = StringIO()
     parser.write(memory_file)
     memory_file.flush()
     memory_file.seek(0)
     return memory_file
Example #33
0
def check_simple_write_read(records, indent=" ") :
    #print indent+"Checking we can write and then read back these records"
    for format in test_write_read_alignment_formats :
        print indent+"Checking can write/read as '%s' format" % format
        
        #Going to write to a handle...
        handle = StringIO()
        
        try :
            c = SeqIO.write(sequences=records, handle=handle, format=format)
            assert c == len(records)
        except ValueError, e :
            #This is often expected to happen, for example when we try and
            #write sequences of different lengths to an alignment file.
            print indent+"Failed: %s" % str(e)
            assert format != t_format, \
                   "Should be able to re-write in the original format!"
            #Carry on to the next format:
            continue

        handle.flush()
        handle.seek(0)
        #Now ready to read back from the handle...
        try :
            records2 = list(SeqIO.parse(handle=handle, format=format))
        except ValueError, e :
            #This is BAD.  We can't read our own output.
            #I want to see the output when called from the test harness,
            #run_tests.py (which can be funny about new lines on Windows)
            handle.seek(0)
            raise ValueError("%s\n\n%s\n\n%s" \
                              % (str(e), repr(handle.read()), repr(records)))
Example #34
0
 def render(self, item, convertedData, **kwargs):
     relPath = item.relPath
     path, name, _ = self.__fs.splitPathFileExt(relPath)
     fileData = item.read()
     
     appletHtml = CmlUtil.getAppletHtml(relPath, 300, 300,
                                        archive=self.__fs.join(path, "JmolApplet.jar"))
     pngHtml = CmlUtil.getPngHtml(self.__fs.join(path, name + ".png"))
     if CmlUtil.isRenderable(fileData):
         if CmlUtil.is3D(fileData):
             bodyHtml = appletHtml
         else:
             bodyHtml = pngHtml
         convertedData.addRenditionData(".xhtml.body", bodyHtml)
         convertedData.addRenditionData(".xhtml.embed", bodyHtml)
         
         util = CmlUtil(self.iceContext, self.iceContext.settings.get('convertUrl'))
         svgData = util.createPreviewImage(fileData, 'svg')
         convertedData.addRenditionData(".svg", svgData)
         
         pngData = util.createPreviewImage(fileData, 'png', '300', '300')
         convertedData.addRenditionData(".png", pngData)
         
         # convert the png to pdf
         if pngData != "":
             image = Image.open(StringIO(pngData))
             pdfFile = StringIO()
             image.save(pdfFile, "PDF")
             pdfFile.flush()
             pdfFile.seek(0)
             convertedData.addRenditionData(".pdf", pdfFile.read())
     else:
         print "Warning: '%s' is not renderable!" % file
     
     return convertedData
Example #35
0
    def pack(self, files, jad_properties=None):
        jad_properties = jad_properties or {}

        # pack files into jar
        buffer = StringIO(self.jar)
        with ZipFile(buffer, 'a', ZIP_DEFLATED) as zipper:
            for path, f in files.items():
                zipper.writestr(path, convert_XML_To_J2ME(f, path, self.use_j2me_endpoint))
        buffer.flush()
        jar = buffer.getvalue()
        buffer.close()

        # update and sign jad
        signed = False
        if self.jad:
            jad = JadDict.from_jad(self.jad, use_j2me_endpoint=self.use_j2me_endpoint)
            jad.update({
                'MIDlet-Jar-Size': len(jar),
            })
            jad.update(jad_properties)
            if hasattr(settings, 'JAR_SIGN'):
                jad = sign_jar(jad, jar, use_j2me_endpoint=self.use_j2me_endpoint)
                signed = True
            else:
                jad = jad.render()
        else:
            jad = None

        return JadJar(jad, jar, self.version, self.build_number,
                      signed=signed, use_j2me_endpoint=self.use_j2me_endpoint)
Example #36
0
def pack_stream(obj):
    stream_out = StringIO()
    pickle.dump(m,stream_out,protocol=PROTOCOL)
    stream_out.flush()
    stream_in = StringIO(stream_out.getvalue())
    stream = pickle.loads(stream_in.getvalue())
    return stream
Example #37
0
def get_translation_zip(request, project_slug, lang_code, mode=None):
    """
    Download all resources in given language in one ZIP file
    """
    project = get_object_or_404(Project, slug=project_slug)
    language = get_object_or_404(Language, code=lang_code)

    zip_buffer = StringIO()
    zip_file = zipfile.ZipFile(zip_buffer, "w", zipfile.ZIP_DEFLATED)
    for resource in Resource.objects.filter(project=project):
        template = _compile_translation_template(resource, language, mode)
        zip_file.writestr(resource.name, template)

    zip_file.close()
    zip_buffer.flush()
    zip_contents = zip_buffer.getvalue()

    if mode == Mode.TRANSLATED:
        subname = "empty"
    else:
        subname = "replaced"
    response = HttpResponse(mimetype='application/zip')
    response['Content-Disposition'] = 'filename=%s_%s_%s.zip' % \
        (project_slug, lang_code, subname)
    response.write(zip_contents)
    return response
Example #38
0
def main_script(page, rev=None, params=None):
    # http://opensourcehacker.com/2011/02/23/temporarily-capturing-python-logging-output-to-a-string-buffer/
    # http://docs.python.org/release/2.6/library/logging.html
    from StringIO import StringIO
    import logging

    # safety; default mode is safe (no writing)
    pywikibot.config.simulate = True

    pywikibot.output(u'--- ' * 20)

    buffer = StringIO()
    rootLogger = logging.getLogger()

    logHandler = logging.StreamHandler(buffer)
    formatter = logging.Formatter(
        "%(asctime)s - %(name)s - %(levelname)s - %(message)s")
    logHandler.setFormatter(formatter)
    rootLogger.addHandler(logHandler)

    sys.stdout = buffer
    sys.stderr = buffer

    # all output to logging and stdout/stderr is catched BUT NOT lua output (!)
    if rev is None:
        code = page.get()  # shell; "on demand"
    else:
        code = page.getOldVersion(rev)  # crontab; scheduled
    try:
        exec(code)
    except:
        # (done according to subster in trunk and submit in rewrite/.../data/api.py)
        pywikibot.exception(
            tb=True)  # secure traceback print (from api.py submit)

    sys.stdout = sys.__stdout__
    sys.stderr = sys.__stderr__

    # Remove our handler
    rootLogger.removeHandler(logHandler)

    logHandler.flush()
    buffer.flush()

    pywikibot.output(u'--- ' * 20)

    # safety; restore settings
    pywikibot.config.simulate = __simulate
    sys.argv = __sys_argv

    pywikibot.output(
        u'environment: garbage; %s / memory; %s / members; %s' %
        (gc.collect(), resource.getrusage(resource.RUSAGE_SELF).ru_maxrss *
         resource.getpagesize(), len(dir())))
    # 'len(dir())' is equivalent to 'len(inspect.getmembers(__main__))'

    # append result to output page
    if rev is None:
        wiki_logger(buffer.getvalue(), page, rev)
Example #39
0
def check_simple_write_read(alignments, indent=" "):
    # print indent+"Checking we can write and then read back these alignments"
    for format in test_write_read_align_with_seq_count:
        records_per_alignment = len(alignments[0])
        for a in alignments:
            if records_per_alignment != len(a):
                records_per_alignment = None
        # Can we expect this format to work?
        if not records_per_alignment and format not in test_write_read_alignment_formats:
            continue

        print(indent + "Checking can write/read as '%s' format" % format)

        # Going to write to a handle...
        handle = StringIO()

        try:
            c = AlignIO.write(alignments, handle=handle, format=format)
            assert c == len(alignments)
        except ValueError as e:
            # This is often expected to happen, for example when we try and
            # write sequences of different lengths to an alignment file.
            print(indent + "Failed: %s" % str(e))
            # Carry on to the next format:
            continue

        # First, try with the seq_count
        if records_per_alignment:
            handle.flush()
            handle.seek(0)
            try:
                alignments2 = list(AlignIO.parse(handle=handle, format=format, seq_count=records_per_alignment))
            except ValueError as e:
                # This is BAD.  We can't read our own output.
                # I want to see the output when called from the test harness,
                # run_tests.py (which can be funny about new lines on Windows)
                handle.seek(0)
                raise ValueError("%s\n\n%s\n\n%s" % (str(e), repr(handle.read()), repr(alignments2)))
            simple_alignment_comparison(alignments, alignments2, format)

        if format in test_write_read_alignment_formats:
            # Don't need the seq_count
            handle.flush()
            handle.seek(0)
            try:
                alignments2 = list(AlignIO.parse(handle=handle, format=format))
            except ValueError as e:
                # This is BAD.  We can't read our own output.
                # I want to see the output when called from the test harness,
                # run_tests.py (which can be funny about new lines on Windows)
                handle.seek(0)
                raise ValueError("%s\n\n%s\n\n%s" % (str(e), repr(handle.read()), repr(alignments2)))
            simple_alignment_comparison(alignments, alignments2, format)

        if len(alignments) > 1:
            # Try writing just one Alignment (not a list)
            handle = StringIO()
            SeqIO.write(alignments[0], handle, format)
            assert handle.getvalue() == alignments[0].format(format)
Example #40
0
class CaptureStdOut(object):
    """
    An logger that both prints to stdout and writes to file.
    """
    def __init__(self, log_file_path=None, print_to_console=True):
        """
        :param log_file_path: The path to save the records, or None if you just want to keep it in memory
        :param print_to_console:
        """
        self._print_to_console = print_to_console
        if log_file_path is not None:
            # self._log_file_path = os.path.join(base_dir, log_file_path.replace('%T', now))
            make_file_dir(log_file_path)
            self.log = open(log_file_path, 'w')
        else:
            self.log = StringIO()
        self._log_file_path = log_file_path
        self.old_stdout = _ORIGINAL_STDOUT

    def __enter__(self):

        self.old_stdout = sys.stdout
        self.old_stderr = sys.stderr

        sys.stdout = self
        sys.stderr = self
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        sys.stdout.flush()
        sys.stderr.flush()
        sys.stdout = self.old_stdout
        sys.stderr = self.old_stderr
        self.close()

    def get_log_file_path(self):
        assert self._log_file_path is not None, "You never specified a path when you created this logger, so don't come back and ask for one now"
        return self._log_file_path

    def write(self, message):
        if self._print_to_console:
            self.old_stdout.write(message)
        self.log.write(message)
        self.log.flush()

    def close(self):
        if self._log_file_path is not None:
            self.log.close()

    def read(self):
        if self._log_file_path is None:
            return self.log.getvalue()
        else:
            with open(self._log_file_path) as f:
                txt = f.read()
            return txt

    def __getattr__(self, item):
        return getattr(self.old_stdout, item)
Example #41
0
def export_csv_iter(*args, **kwargs):
    s = StringIO()
    w = csv.writer(s)
    for row in export_iter(*args, **kwargs):
        w.writerow(row)
        s.flush()
        yield s.getvalue()
        s.truncate(0)
 def test_write_empy_seq(self):
     'It does not write an empty sequence'
     seq1 = SeqRecord(Seq('ACTG'), id='seq1')
     fhand = StringIO()
     write_seqrecords([seq1, None, SeqRecord(Seq(''), id='seq2')], fhand,
                      file_format='fasta')
     fhand.flush()
     assert fhand.getvalue() == '>seq1\nACTG\n'
Example #43
0
def export_csv_iter(*args, **kwargs):
    s = StringIO()
    w = csv.writer(s)
    for row in export_iter(*args, **kwargs):
        w.writerow(row)
        s.flush()
        yield s.getvalue()
        s.truncate(0)
Example #44
0
class Collector(object):
    """
  Collector for map and reduce output values
  """
    def __init__(self, scheme=None, outputClient=None):
        """

    Parameters
    ---------------------------------------------
    scheme - The scheme for the datums to output - can be a json string
           - or an instance of Schema
    outputClient - The output client used to send messages to the parent
    """

        if not (isinstance(scheme, schema.Schema)):
            scheme = schema.parse(scheme)

        if (outputClient is None):
            raise ValueError("output client can't be none.")

        self.scheme = scheme
        self.buff = StringIO()
        self.encoder = avio.BinaryEncoder(self.buff)

        self.datum_writer = avio.DatumWriter(writers_schema=self.scheme)
        self.outputClient = outputClient

    def collect(self, record, partition=None):
        """Collect a map or reduce output value

    Parameters
    ------------------------------------------------------
    record - The record to write
    partition - Indicates the partition for a pre-partitioned map output
              - currently not supported
    """

        self.buff.truncate(0)
        self.datum_writer.write(record, self.encoder)
        self.buff.flush()
        self.buff.seek(0)

        # delete all the data in the buffer
        if (partition is None):

            # TODO: Is there a more efficient way to read the data in self.buff?
            # we could use self.buff.read() but that returns the byte array as a string
            # will that work?  We can also use self.buff.readinto to read it into
            # a bytearray but the byte array must be pre-allocated
            # self.outputClient.output(self.buff.buffer.read())

            #its not a StringIO
            self.outputClient.request("output", {"datum": self.buff.read()})
        else:
            self.outputClient.request("outputPartitioned", {
                "datum": self.buff.read(),
                "partition": partition
            })
Example #45
0
    def from_directories(cls, directories, pattern=None, ignore=(), write=None, relative_to=None):
        """
        convert directories to a simple manifest; returns ManifestParser instance

        pattern -- shell pattern (glob) or patterns of filenames to match
        ignore -- directory names to ignore
        write -- filename or file-like object of manifests to write;
                 if `None` then a StringIO instance will be created
        relative_to -- write paths relative to this path;
                       if false then the paths are absolute
        """


        # determine output
        opened_manifest_file = None # name of opened manifest file
        absolute = not relative_to # whether to output absolute path names as names
        if isinstance(write, string):
            opened_manifest_file = write
            write = file(write, 'w')
        if write is None:
            write = StringIO()

        # walk the directories, generating manifests
        def callback(directory, dirpath, dirnames, filenames):

            # absolute paths
            filenames = [os.path.join(dirpath, filename)
                         for filename in filenames]
            # ensure new manifest isn't added
            filenames = [filename for filename in filenames
                         if filename != opened_manifest_file]
            # normalize paths
            if not absolute and relative_to:
                filenames = [relpath(filename, relative_to)
                             for filename in filenames]

            # write to manifest
            print >> write, '\n'.join(['[%s]' % denormalize_path(filename)
                                               for filename in filenames])


        cls._walk_directories(directories, callback, pattern=pattern, ignore=ignore)

        if opened_manifest_file:
            # close file
            write.close()
            manifests = [opened_manifest_file]
        else:
            # manifests/write is a file-like object;
            # rewind buffer
            write.flush()
            write.seek(0)
            manifests = [write]


        # make a ManifestParser instance
        return cls(manifests=manifests)
Example #46
0
    def from_directories(cls, directories, pattern=None, ignore=(), write=None, relative_to=None):
        """
        convert directories to a simple manifest; returns ManifestParser instance

        pattern -- shell pattern (glob) or patterns of filenames to match
        ignore -- directory names to ignore
        write -- filename or file-like object of manifests to write;
                 if `None` then a StringIO instance will be created
        relative_to -- write paths relative to this path;
                       if false then the paths are absolute
        """


        # determine output
        opened_manifest_file = None # name of opened manifest file
        absolute = not relative_to # whether to output absolute path names as names
        if isinstance(write, string):
            opened_manifest_file = write
            write = file(write, 'w')
        if write is None:
            write = StringIO()

        # walk the directories, generating manifests
        def callback(directory, dirpath, dirnames, filenames):

            # absolute paths
            filenames = [os.path.join(dirpath, filename)
                         for filename in filenames]
            # ensure new manifest isn't added
            filenames = [filename for filename in filenames
                         if filename != opened_manifest_file]
            # normalize paths
            if not absolute and relative_to:
                filenames = [relpath(filename, relative_to)
                             for filename in filenames]

            # write to manifest
            print >> write, '\n'.join(['[%s]' % denormalize_path(filename)
                                               for filename in filenames])


        cls._walk_directories(directories, callback, pattern=pattern, ignore=ignore)

        if opened_manifest_file:
            # close file
            write.close()
            manifests = [opened_manifest_file]
        else:
            # manifests/write is a file-like object;
            # rewind buffer
            write.flush()
            write.seek(0)
            manifests = [write]


        # make a ManifestParser instance
        return cls(manifests=manifests)
Example #47
0
class Collector(object):
  """
  Collector for map and reduce output values
  """
  def __init__(self,scheme=None,outputClient=None):
    """

    Parameters
    ---------------------------------------------
    scheme - The scheme for the datums to output - can be a json string
           - or an instance of Schema
    outputClient - The output client used to send messages to the parent
    """

    if not(isinstance(scheme,schema.Schema)):
      scheme=schema.parse(scheme)

    if (outputClient is None):
      raise ValueError("output client can't be none.")

    self.scheme=scheme
    self.buff=StringIO()
    self.encoder=avio.BinaryEncoder(self.buff)

    self.datum_writer = avio.DatumWriter(writers_schema=self.scheme)
    self.outputClient=outputClient

  def collect(self,record,partition=None):
    """Collect a map or reduce output value

    Parameters
    ------------------------------------------------------
    record - The record to write
    partition - Indicates the partition for a pre-partitioned map output
              - currently not supported
    """

    self.buff.truncate(0)
    self.datum_writer.write(record, self.encoder);
    self.buff.flush();
    self.buff.seek(0)

    # delete all the data in the buffer
    if (partition is None):

      # TODO: Is there a more efficient way to read the data in self.buff?
      # we could use self.buff.read() but that returns the byte array as a string
      # will that work?  We can also use self.buff.readinto to read it into
      # a bytearray but the byte array must be pre-allocated
      # self.outputClient.output(self.buff.buffer.read())

      #its not a StringIO
      self.outputClient.request("output",{"datum":self.buff.read()})
    else:
      self.outputClient.request("outputPartitioned",{"datum":self.buff.read(),"partition":partition})
Example #48
0
def test_timesteps_per_at_run():
    """
    Check that each autotuning run (ie with a given block shape) takes
    ``autotuning.options['at_squeezer'] - data.time_order`` timesteps.
    in an operator performing an increment such as
    ``a[t + timeorder, ...] = f(a[t, ...], ...)``.
    """

    buffer = StringIO()
    temporary_handler = logging.StreamHandler(buffer)
    logger.addHandler(temporary_handler)
    set_log_level('DEBUG')

    shape = (30, 30, 30)
    grid = Grid(shape=shape)
    x, y, z = grid.dimensions
    t = grid.stepping_dim

    # Function
    infield = Function(name='infield', grid=grid)
    infield.data[:] = np.arange(reduce(mul, shape),
                                dtype=np.int32).reshape(shape)
    outfield = Function(name='outfield', grid=grid)
    stencil = Eq(outfield.indexify(),
                 outfield.indexify() + infield.indexify() * 3.0)
    op = Operator(stencil, dle=('blocking', {'blockalways': True}))
    op(infield=infield, outfield=outfield, autotune=True)
    out = [i for i in buffer.getvalue().split('\n') if 'AutoTuner:' in i]
    assert len(out) == 4
    assert all('in 1 time steps' in i for i in out)
    buffer.truncate(0)

    # TimeFunction with increasing time order
    for to in [1, 2, 4]:
        infield = TimeFunction(name='infield', grid=grid, time_order=to)
        infield.data[:] = np.arange(reduce(mul, infield.shape),
                                    dtype=np.int32).reshape(infield.shape)
        outfield = TimeFunction(name='outfield', grid=grid, time_order=to)
        stencil = Eq(outfield.indexed[t + to, x, y, z],
                     outfield.indexify() + infield.indexify() * 3.0)
        op = Operator(stencil, dle=('blocking', {'blockalways': True}))
        op(infield=infield, outfield=outfield, autotune=True)
        out = [i for i in buffer.getvalue().split('\n') if 'AutoTuner:' in i]
        expected = options['at_squeezer'] - to
        assert len(out) == 4
        assert all('in %d time steps' % expected in i for i in out)
        buffer.truncate(0)

    logger.removeHandler(temporary_handler)

    temporary_handler.flush()
    temporary_handler.close()
    buffer.flush()
    buffer.close()
    set_log_level('INFO')
Example #49
0
def test_timesteps_per_at_run():
    """
    Check that each autotuning run (ie with a given block shape) takes
    ``autotuning.core.options['at_squeezer']`` timesteps, for an operator
    performing the increment ``a[t + timeorder, ...] = f(a[t, ...], ...)``.
    """
    from devito.core.autotuning import options

    buffer = StringIO()
    temporary_handler = logging.StreamHandler(buffer)
    logger.addHandler(temporary_handler)

    shape = (30, 30, 30)
    grid = Grid(shape=shape)
    x, y, z = grid.dimensions
    t = grid.stepping_dim

    # Function
    infield = Function(name='infield', grid=grid)
    infield.data[:] = np.arange(reduce(mul, shape),
                                dtype=np.int32).reshape(shape)
    outfield = Function(name='outfield', grid=grid)
    stencil = Eq(outfield.indexify(),
                 outfield.indexify() + infield.indexify() * 3.0)
    op = Operator(stencil, dle=('blocking', {'blockalways': True}))
    op(infield=infield, outfield=outfield, autotune=True)
    out = [i for i in buffer.getvalue().split('\n') if 'AutoTuner:' in i]
    assert len(out) == 4
    assert all('in 1 time steps' in i for i in out)
    buffer.truncate(0)

    # TimeFunction with increasing time order; increasing the time order
    # shouldn't affect how many iterations the autotuner is gonna run
    for to in [1, 2, 4]:
        infield = TimeFunction(name='infield', grid=grid, time_order=to)
        infield.data[:] = np.arange(reduce(mul, infield.shape),
                                    dtype=np.int32).reshape(infield.shape)
        outfield = TimeFunction(name='outfield', grid=grid, time_order=to)
        stencil = Eq(outfield.indexed[t + to, x, y, z],
                     outfield.indexify() + infield.indexify() * 3.0)
        op = Operator(stencil, dle=('blocking', {'blockalways': True}))
        op(infield=infield, outfield=outfield, t=2, autotune=True)
        out = [i for i in buffer.getvalue().split('\n') if 'AutoTuner:' in i]
        assert len(out) == 4
        assert all('in %d time steps' % options['at_squeezer'] in i
                   for i in out)
        buffer.truncate(0)

    logger.removeHandler(temporary_handler)

    temporary_handler.flush()
    temporary_handler.close()
    buffer.flush()
    buffer.close()
def public_key_list(request):
    public_keys = find_public_keys(request)
    ks = public_keys.keys()
    ks.sort()
    out = StringIO()
    for i in range(len(ks)):
        out.write("%s=%s\n"%(i, ks[i]))
    out.flush()
    toRet = out.getvalue()
    out.close()
    return toRet
Example #51
0
class OutputCapture(object):
    def __init__(self):
        self.saved_outputs = dict()
        self._log_level = logging.INFO

    def set_log_level(self, log_level):
        self._log_level = log_level
        if hasattr(self, '_logs_handler'):
            self._logs_handler.setLevel(self._log_level)

    def _capture_output_with_name(self, output_name):
        self.saved_outputs[output_name] = getattr(sys, output_name)
        captured_output = StringIO()
        setattr(sys, output_name, captured_output)
        return captured_output

    def _restore_output_with_name(self, output_name):
        captured_output = getattr(sys, output_name).getvalue()
        setattr(sys, output_name, self.saved_outputs[output_name])
        del self.saved_outputs[output_name]
        return captured_output

    def capture_output(self):
        self._logs = StringIO()
        self._logs_handler = logging.StreamHandler(self._logs)
        self._logs_handler.setLevel(self._log_level)
        logging.getLogger().addHandler(self._logs_handler)
        return (self._capture_output_with_name("stdout"), self._capture_output_with_name("stderr"))

    def restore_output(self):
        logging.getLogger().removeHandler(self._logs_handler)
        self._logs_handler.flush()
        self._logs.flush()
        logs_string = self._logs.getvalue()
        delattr(self, '_logs_handler')
        delattr(self, '_logs')
        return (self._restore_output_with_name("stdout"), self._restore_output_with_name("stderr"), logs_string)

    def assert_outputs(self, testcase, function, args=[], kwargs={}, expected_stdout="", expected_stderr="", expected_exception=None, expected_logs=None):
        self.capture_output()
        try:
            if expected_exception:
                return_value = testcase.assertRaises(expected_exception, function, *args, **kwargs)
            else:
                return_value = function(*args, **kwargs)
        finally:
            (stdout_string, stderr_string, logs_string) = self.restore_output()

        testcase.assertEqual(stdout_string, expected_stdout)
        testcase.assertEqual(stderr_string, expected_stderr)
        if expected_logs is not None:
            testcase.assertEqual(logs_string, expected_logs)
        # This is a little strange, but I don't know where else to return this information.
        return return_value
Example #52
0
def main_script(page, rev=None, params=None):
    # http://opensourcehacker.com/2011/02/23/temporarily-capturing-python-logging-output-to-a-string-buffer/
    # https://docs.python.org/release/2.6/library/logging.html
    from StringIO import StringIO
    import logging

    # safety; default mode is safe (no writing)
    pywikibot.config.simulate = True

    pywikibot.output(u'--- ' * 20)

    buffer = StringIO()
    rootLogger = logging.getLogger()

    logHandler = logging.StreamHandler(buffer)
    formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
    logHandler.setFormatter(formatter)
    rootLogger.addHandler(logHandler)

    sys.stdout = buffer
    sys.stderr = buffer

    # all output to logging and stdout/stderr is catched BUT NOT lua output (!)
    if rev is None:
        code = page.get()               # shell; "on demand"
    else:
        code = page.getOldVersion(rev)  # crontab; scheduled
    try:
        exec(code)
    except:
        # (done according to subster in trunk and submit in rewrite/.../data/api.py)
        pywikibot.exception(tb=True)  # secure traceback print (from api.py submit)

    sys.stdout = sys.__stdout__
    sys.stderr = sys.__stderr__

    # Remove our handler
    rootLogger.removeHandler(logHandler)

    logHandler.flush()
    buffer.flush()

    pywikibot.output(u'--- ' * 20)

    # safety; restore settings
    pywikibot.config.simulate = __simulate
    sys.argv = __sys_argv

    pywikibot.output(u'environment: garbage; %s / memory; %s / members; %s' % (gc.collect(), resource.getrusage(resource.RUSAGE_SELF).ru_maxrss * resource.getpagesize(), len(dir())))
    # 'len(dir())' is equivalent to 'len(inspect.getmembers(__main__))'

    # append result to output page
    if rev is None:
        wiki_logger(buffer.getvalue(), page, rev)
Example #53
0
def bio3graph_construct_compounds_from_gene_synonyms(input_dict):
    import csv
    from StringIO import StringIO

    syns = input_dict['gene_synonyms']
    s = StringIO()
    w = csv.writer(s)
    for g in syns:
        elts = [g] + syns[g]
        w.writerow(elts)
    s.flush()
    result = s.getvalue()
    return {'compounds_csv': result}
Example #54
0
def c14n(xml):
    '''
    Applies c14n to the xml input
    @param xml: str
    @return: str
    '''
    tree = etree.parse(StringIO(xml))
    output = StringIO()
    tree.write_c14n(output, exclusive=False, with_comments=True, compression=0)
    output.flush()
    c14nized = output.getvalue().decode('utf-8')
    output.close()
    return c14nized
Example #55
0
def check_simple_write_read(records, indent=" "):
    #print indent+"Checking we can write and then read back these records"
    for format in test_write_read_alignment_formats:
        if format not in possible_unknown_seq_formats \
        and isinstance(records[0].seq, UnknownSeq) \
        and len(records[0].seq) > 100:
           #Skipping for speed.  Some of the unknown sequences are
           #rather long, and it seems a bit pointless to record them.
           continue
        print indent+"Checking can write/read as '%s' format" % format
        
        #Going to write to a handle...
        handle = StringIO()
        
        try:
            c = SeqIO.write(sequences=records, handle=handle, format=format)
            assert c == len(records)
        except (TypeError, ValueError), e:
            #This is often expected to happen, for example when we try and
            #write sequences of different lengths to an alignment file.
            if "len()" in str(e):
                #Python 2.4.3,
                #>>> len(None)
                #...
                #TypeError: len() of unsized object
                #
                #Python 2.5.2,
                #>>> len(None)
                #...
                #TypeError: object of type 'NoneType' has no len()
                print "Failed: Probably len() of None"
            else:
                print indent+"Failed: %s" % str(e)
            assert format != t_format, \
                   "Should be able to re-write in the original format!"
            #Carry on to the next format:
            continue

        handle.flush()
        handle.seek(0)
        #Now ready to read back from the handle...
        try:
            records2 = list(SeqIO.parse(handle=handle, format=format))
        except ValueError, e:
            #This is BAD.  We can't read our own output.
            #I want to see the output when called from the test harness,
            #run_tests.py (which can be funny about new lines on Windows)
            handle.seek(0)
            raise ValueError("%s\n\n%s\n\n%s" \
                              % (str(e), repr(handle.read()), repr(records)))