def read(self,len=None): ## Call our baseclass to see if we have cached data: try: return File.read(self,len) except IOError: pass self.create_new_stream(self.inode_ids) self.look_for_cached() ## For the reassembler its sometimes legitimate to have no ## cached file - this is because the file length is 0 bytes, ## and the reassembler does not bother to create it try: return File.read(self,len) except IOError: return ''
def read(self, length=None): try: return File.read(self,length) except IOError: pass if self.readptr > 0: return '' self.fd.seek(0) a=email.message_from_file(self.fd) my_part = self.inode.split('|')[-1] attachment_number = int(my_part[1:]) #print "attchement number %s" % attachment_number count = 0 for part in a.walk(): if part.get_content_maintype() == 'multipart': continue if count==attachment_number: self.message = part.get_payload(decode=1) self.readptr += len(self.message) #print "Returning %s" % part.get_payload() return self.message count+=1 return ''
def read(self, length=None): try: return File.read(self, length) except IOError: pass if self.readptr > 0: return '' self.fd.seek(0) a = email.message_from_file(self.fd) my_part = self.inode.split('|')[-1] attachment_number = int(my_part[1:]) #print "attchement number %s" % attachment_number count = 0 for part in a.walk(): if part.get_content_maintype() == 'multipart': continue if count == attachment_number: self.message = part.get_payload(decode=1) self.readptr += len(self.message) #print "Returning %s" % part.get_payload() return self.message count += 1 return ''
def read(self, length=None): try: return File.read(self,length) except IOError: pass if not self.gz: self.fd.seek(0) self.gz = gzip.zlib.decompressobj(-15) count = 0 step = 1024 result = '' ## Copy ourself into the file - This is in case we have errors ## in the file, we try to read as much as possible: while 1: try: data=self.gz.decompress(self.fd.read(step)) except IOError,e: step /= 2 if step<10: pyflaglog.log(pyflaglog.DEBUG, "Error reading from %s, could only get %s bytes" % (self.fd.inode, count)); break else: continue except Exception, e: pyflaglog.log(pyflaglog.WARNING, "Unable to decompress inode %s" % e) break
def read(self, len=None): ## Call our baseclass to see if we have cached data: try: return File.read(self, len) except IOError: pass self.create_new_stream(self.inode_ids) self.look_for_cached() ## For the reassembler its sometimes legitimate to have no ## cached file - this is because the file length is 0 bytes, ## and the reassembler does not bother to create it try: return File.read(self, len) except IOError: return ''
def read(self, length=None): try: return File.read(self,length) except IOError: pass if not self.gz: self.fd.seek(0) self.gz = gzip.GzipFile(fileobj=self.fd, mode='r') count = 0 step = 1024 result = '' ## Copy ourself into the file - This is in case we have errors ## in the file, we try to read as much as possible: while 1: try: data=self.gz.read(step) except IOError,e: step /= 2 if step<10: pyflaglog.log(pyflaglog.DEBUG, "Error reading from %s(%s), could only get %s bytes (wanted %s/%s)" % (self.fd.inode, e, count, length,self.size)); break else: continue except Exception, e: #pyflaglog.log(pyflaglog.WARNING, "Unable to decompress inode (%s) %s" % (self.inode, e)) break
def read(self,len=None): ## Call our baseclass to see if we have cached data: try: return File.read(self,len) except IOError: pass if len: temp=self.data[self.readptr:self.readptr+len] self.readptr+=len return temp else: return self.data
def read(self, length=None): ## Call our baseclass to see if we have cached data: try: return File.read(self, length) except IOError: pass ## Read as much as possible if length == None: length = sys.maxint ## This is done in order to decompress the file in small ## chunks. We try to return as much data as was required ## and not much more try: ## Consume the data left over from previous reads result = self.left_over[:length] self.left_over = self.left_over[length:] ## We keep reading compressed data until we can satify ## the desired length while len(result) < length and self.clength > 0: ## Read up to 1k of the file: available_clength = min(self.blocksize, self.clength) cdata = self.fd.read(available_clength) self.clength -= available_clength if self.type == Zip.ZIP_DEFLATED: ## Now Decompress that: try: ddata = self.d.decompress(cdata) except: ddata = '' elif self.type == Zip.ZIP_STORED: ddata = cdata else: raise RuntimeError( "Compression method %s is not supported" % self.type) ## How much data do we require? required_length = length - len(result) result += ddata[:required_length] ## This will be '' if we have not finished making ## up the result, and store the rest for next time ## if we have self.left_over = ddata[required_length:] except (IndexError, KeyError, zipfile.BadZipfile), e: raise IOError("Zip_File: (%s)" % e)
def read(self,length=None): ## Call our baseclass to see if we have cached data: try: return File.read(self,length) except IOError: pass ## Read as much as possible if length==None: length = sys.maxint ## This is done in order to decompress the file in small ## chunks. We try to return as much data as was required ## and not much more try: ## Consume the data left over from previous reads result = self.left_over[:length] self.left_over=self.left_over[length:] ## We keep reading compressed data until we can satify ## the desired length while len(result)<length and self.clength>0: ## Read up to 1k of the file: available_clength = min(self.blocksize,self.clength) cdata = self.fd.read(available_clength) self.clength -= available_clength if self.type == Zip.ZIP_DEFLATED: ## Now Decompress that: try: ddata = self.d.decompress(cdata) except: ddata = '' elif self.type == Zip.ZIP_STORED: ddata = cdata else: raise RuntimeError("Compression method %s is not supported" % self.type) ## How much data do we require? required_length = length - len(result) result += ddata[:required_length] ## This will be '' if we have not finished making ## up the result, and store the rest for next time ## if we have self.left_over = ddata[required_length:] except (IndexError, KeyError, zipfile.BadZipfile),e: raise IOError("Zip_File: (%s)" % e)
def read(self, length=None): ## Call our baseclass to see if we have cached data: try: return File.read(self, length) except IOError: pass item = self.pst.get_item(self.item_id) result = '' properties = item.properties() if self.attach_number == None: result = format_properties(properties) else: attachment = properties['_attachments'][self.attach_number] result = attachment['body'] self.size = len(result) return result
def read(self,length=None): ## Call our baseclass to see if we have cached data: try: return File.read(self,length) except IOError: pass item = self.pst.get_item(self.item_id) result ='' properties = item.properties() if self.attach_number == None: result = format_properties(properties) else: attachment = properties['_attachments'][self.attach_number] result = attachment['body'] self.size = len(result) return result
def read(self,length=None): try: return File.read(self, length) except IOError,e: pass