def test_framed_message_exceeds_max_size(self): parser = SyslogFrameParser(10) message = StringIO() message.write("11 hello world") message.seek(0) self.assertRaises(RequestSizeExceeded, lambda: parser.parse_request(message, 32))
def next(self): resultBuilder = StringIO() for i in range(0, self.length): pos = self.rv.randint(0, len(self.alphabet) - 1) resultBuilder.write(self.alphabet[pos]) i += 1 return resultBuilder.getvalue()
def sanitize_metadata(stream, content_type, strip_metadata): text_plain = content_type == 'text/plain' s = None t = None clean_file = False if strip_metadata and not text_plain: t = tempfile.NamedTemporaryFile(delete = False) copyfileobj(stream, t) t.flush() file_meta = metadata_handler(t.name) if not file_meta.is_clean(): file_meta.remove_all() f = open(t.name) s = StringIO() s.write(f.read()) f.close() s.reset() secure_unlink(t.name, do_verify = False) t.close() else: secure_unlink(t.name, do_verify = False) t.close() return s
def getxml(self): # see if there were any template strings loaded from the db, # {label:value} templateOverrides = rhnFlags.get('templateOverrides') # update the templateValues in the module if templateOverrides: for label in templateOverrides.keys(): # only care about values we've defined defaults for... if label in templateValues: templateValues[label] = templateOverrides[label] s = StringIO() s.write("\n") if self.text: s.write(_("Error Message:\n %s\n") % self.text.strip()) if self.code: s.write(_("Error Class Code: %s\n") % self.code) if self.arrayText: cinfo = self.arrayText % templateValues s.write(_("Error Class Info: %s\n") % cinfo.rstrip()) if self.explain: s.write(_("Explanation: %s") % Explain) if not self.code: return xmlrpclib.Fault(1, s.getvalue()) return xmlrpclib.Fault(-self.code, s.getvalue())
def readline(self, size=-1): if not self._is_socket: return self.tmp.readline() orig_size = self._tmp_size() if self.tmp.tell() == orig_size: if not self._tee(self.CHUNK_SIZE): return '' self.tmp.seek(orig_size) # now we can get line line = self.tmp.readline() if line.find("\n") >=0: return line buf = StringIO() buf.write(line) while True: orig_size = self.tmp.tell() data = self._tee(self.CHUNK_SIZE) if not data: break self.tmp.seek(orig_size) buf.write(self.tmp.readline()) if data.find("\n") >= 0: break return buf.getvalue()
def get_data(im): s = StringIO() if im.format != 'DIB': im.save(s, im.format) else: s.write(im.buf) s.seek(0) if im.format == 'BMP': bmp_f = s bmp_f.seek(10) offset = i32(bmp_f.read(4)) dib_size = i32(bmp_f.read(4)) dib = o32(dib_size)+bytearray(bmp_f.read(36)) dib[:4] = o32(40) dib[8:12] = o32(i32(str(dib[8:12]))*2) dib[16:20] = o32(0) dib = dib[:40] bmp_f.seek(offset) data = bytearray(bmp_f.read()) data = dib+data else: data = bytearray(s.read()) return data
def _create_stringo(value, position, closed): f = StringIO() if closed: f.close() else: f.write(value) f.seek(position) return f
def __init__(self, cr, name, table, rml=False, parser=False, header=True, store=False): super(Aeroo_report, self).__init__(name, table, rml, parser, header, store) self.logger("registering %s (%s)" % (name, table), logging.INFO) self.active_prints = {} pool = pooler.get_pool(cr.dbname) ir_obj = pool.get('ir.actions.report.xml') name = name.startswith('report.') and name[7:] or name try: report_xml_ids = ir_obj.search(cr, 1, [('report_name', '=', name)]) if report_xml_ids: report_xml = ir_obj.browse(cr, 1, report_xml_ids[0]) else: report_xml = False if report_xml and report_xml.preload_mode == 'preload': file_data = report_xml.report_sxw_content if not file_data: self.logger("template is not defined in %s (%s) !" % (name, table), logging.WARNING) template_io = None else: template_io = StringIO() template_io.write(base64.decodestring(file_data)) style_io=self.get_styles_file(cr, 1, report_xml) if template_io: self.serializer = OOSerializer(template_io, oo_styles=style_io) except Exception, e: print e
def create_aeroo_report(self, cr, uid, ids, data, report_xml, context=None, output='odt'): """ Returns an aeroo report generated with aeroolib """ pool = pooler.get_pool(cr.dbname) if not context: context={} context = context.copy() if self.name=='report.printscreen.list': context['model'] = data['model'] context['ids'] = ids print_id = context.get('print_id', False) aeroo_print = self.active_prints[print_id] # Aeroo print object aeroo_print.subreports = [] #self.oo_subreports[print_id] = [] objects = self.getObjects_mod(cr, uid, ids, report_xml.report_type, context) or [] oo_parser = self.parser(cr, uid, self.name2, context=context) oo_parser.localcontext.update(context) oo_parser.set_context(objects, data, ids, report_xml.report_type) self.set_xml_data_fields(objects, oo_parser) # Get/Set XML oo_parser.localcontext['data'] = data oo_parser.localcontext['user_lang'] = context.get('lang', False) if len(objects)>0: oo_parser.localcontext['o'] = objects[0] xfunc = ExtraFunctions(cr, uid, report_xml.id, oo_parser.localcontext) oo_parser.localcontext.update(xfunc.functions) #company_id = objects and 'company_id' in objects[0]._table._columns.keys() and \ # objects[0].company_id and objects[0].company_id.id or False # for object company usage company_id = False style_io=self.get_styles_file(cr, uid, report_xml, company=company_id, context=context) if report_xml.tml_source in ('file', 'database'): if not report_xml.report_sxw_content or report_xml.report_sxw_content=='False': raise osv.except_osv(_('Error!'), _('No template found!')) file_data = base64.decodestring(report_xml.report_sxw_content) else: file_data = self.get_other_template(cr, uid, data, oo_parser) if not file_data and not report_xml.report_sxw_content: self.logger("End process %s (%s), elapsed time: %s" % (self.name, self.table, time.time() - aeroo_print.start_time), logging.INFO) # debug mode return False, output #elif file_data: # template_io = StringIO() # template_io.write(file_data or report_xml.report_sxw_content) # basic = Template(source=template_io, styles=style_io) else: if report_xml.preload_mode == 'preload' and hasattr(self, 'serializer'): serializer = copy.copy(self.serializer) serializer.apply_style(style_io) template_io = serializer.template else: template_io = StringIO() template_io.write(file_data or base64.decodestring(report_xml.report_sxw_content) ) serializer = OOSerializer(template_io, oo_styles=style_io) try: basic = Template(source=template_io, serializer=serializer) except Exception, e: self._raise_exception(e, print_id)
def do_POST(self): if not self.authenticate(): return """Serve a POST request.""" r, info, meta = self.deal_post_data() res = 'Success' if r else 'Failure' log.info("Upload {} {} by: {}".format(res, info, self.client_address)) f = StringIO() ref = self.headers.get('referer', 'None') response = {'result': res, 'referer': ref, 'info': info} result = """ <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN"> <html><title>Upload Result Page</title> <body><h2>Upload Result Page</h2> <hr> <strong>{result}:</strong> {info} <br><a href="{referer}">back</a>" </body></html> """ f.write(result.format(**response)) length = f.tell() f.seek(0) self.send_response(200) self.send_header("Content-type", "text/html") self.send_header("Content-Length", str(length)) self.end_headers() if f: self.copyfile(f, self.wfile) f.close()
def build_cmap(string): length = len(string) buffer = StringIO() buffer.seek(0) base_stream = Base(buffer) buffer.write("CMAP") base_stream.WriteBEUInt32(length * 4 + 18) base_stream.WriteBEUInt16(0) base_stream.WriteBEUInt16(0XFFFF) base_stream.WriteBEUInt16(2) base_stream.WriteBEUInt16(0) base_stream.WriteBEUInt32(0) base_stream.WriteBEUInt16(length) for i in xrange(length): char = string[i] uchar = struct.unpack("H" ,char.encode("utf-16")[2:])[0] base_stream.WriteBEUInt16(uchar) base_stream.WriteBEUInt16(i) buffer.seek(0,2) end_pos = buffer.tell() print(hex(end_pos)) if end_pos % 4 != 0: buffer.write("\x00" * (0x4 - end_pos%4)) buffer.seek(0,2) end_pos = buffer.tell() buffer.seek(4) base_stream.WriteBEUInt32(end_pos) cmapdata = buffer.getvalue() return cmapdata
def tricks_generate_yaml(args): """ Subcommand to generate Yaml configuration for tricks named on the command line. :param args: Command line argument options. """ python_paths = path_split(args.python_path) add_to_sys_path(python_paths) output = StringIO() for trick_path in args.trick_paths: TrickClass = load_class(trick_path) output.write(TrickClass.generate_yaml()) content = output.getvalue() output.close() header = yaml.dump({CONFIG_KEY_PYTHON_PATH: python_paths}) header += "%s:\n" % CONFIG_KEY_TRICKS if args.append_to_file is None: # Output to standard output. if not args.append_only: content = header + content sys.stdout.write(content) else: if not os.path.exists(args.append_to_file): content = header + content with open(args.append_to_file, 'ab') as output: output.write(content)
def getFile(self, site, inner_path): # Use streamFile if client supports it if config.stream_downloads and self.connection and self.connection.handshake and self.connection.handshake["rev"] > 310: return self.streamFile(site, inner_path) location = 0 if config.use_tempfiles: buff = tempfile.SpooledTemporaryFile(max_size=16 * 1024, mode='w+b') else: buff = StringIO() s = time.time() while True: # Read in 512k parts res = self.request("getFile", {"site": site, "inner_path": inner_path, "location": location}) if not res or "body" not in res: # Error return False buff.write(res["body"]) res["body"] = None # Save memory if res["location"] == res["size"]: # End of file break else: location = res["location"] self.download_bytes += res["location"] self.download_time += (time.time() - s) if self.site: self.site.settings["bytes_recv"] = self.site.settings.get("bytes_recv", 0) + res["location"] buff.seek(0) return buff
def read(self, fh, offset, length): """Read `size` bytes from `fh` at position `off` Unless EOF is reached, returns exactly `size` bytes. This method releases the global lock while it is running. """ buf = StringIO() inode = self.inodes[fh] # Make sure that we don't read beyond the file size. This # should not happen unless direct_io is activated, but it's # cheap and nice for testing. size = inode.size length = min(size - offset, length) while length > 0: tmp = self._read(fh, offset, length) buf.write(tmp) length -= len(tmp) offset += len(tmp) # Inode may have expired from cache inode = self.inodes[fh] if inode.atime < inode.ctime or inode.atime < inode.mtime: inode.atime = time.time() return buf.getvalue()
def _read(self, name): memory_file = StringIO() try: o = self.client.get(self.prefix, name) memory_file.write(o.data) except sae.storage.ObjectNotExistsError, e: # FIXME: Check here later pass
def formatDefinitions(options, COLS): s = StringIO() indent = " " * 10 width = COLS - 11 if width < 15: width = COLS - 2 indent = " " for (longname, default, doc) in options: if doc == '': continue s.write('--' + longname + ' <arg>\n') if default is not None: doc += ' (defaults to ' + repr(default) + ')' i = 0 for word in doc.split(): if i == 0: s.write(indent + word) i = len(word) elif i + len(word) >= width: s.write('\n' + indent + word) i = len(word) else: s.write(' ' + word) i += len(word) + 1 s.write('\n\n') return s.getvalue()
def execute(self, server): ts = State(self.suite_ini, server.admin, TarantoolServer) cmd = None def send_command(command): result = ts.curcon[0](command, silent=True) for conn in ts.curcon[1:]: conn(command, silent=True) return result for line in open(self.name, 'r'): if not cmd: cmd = StringIO() if line.find('--#') == 0: rescom = cmd.getvalue().replace('\n\n', '\n') if rescom: sys.stdout.write(cmd.getvalue()) result = send_command(rescom) sys.stdout.write(result.replace("\r\n", "\n")) sys.stdout.write(line) ts(line) elif line.find('--') == 0: sys.stdout.write(line) else: if line.strip() or cmd.getvalue(): cmd.write(line) delim_len = -len(ts.delimiter) if len(ts.delimiter) else None if line.endswith(ts.delimiter+'\n') and cmd.getvalue().strip()[:delim_len].strip(): sys.stdout.write(cmd.getvalue()) rescom = cmd.getvalue()[:delim_len].replace('\n\n', '\n') result = send_command(rescom) sys.stdout.write(result.replace("\r\n", "\n")) cmd.close() cmd = None ts.flush()
def parse(self, unreader): buf = StringIO() self.get_data(unreader, buf, stop=True) # Request line idx = buf.getvalue().find("\r\n") while idx < 0: self.get_data(unreader, buf) idx = buf.getvalue().find("\r\n") self.parse_first_line(buf.getvalue()[:idx]) rest = buf.getvalue()[idx+2:] # Skip \r\n buf.truncate(0) buf.write(rest) # Headers idx = buf.getvalue().find("\r\n\r\n") done = buf.getvalue()[:2] == "\r\n" while idx < 0 and not done: self.get_data(unreader, buf) idx = buf.getvalue().find("\r\n\r\n") done = buf.getvalue()[:2] == "\r\n" if done: self.unreader.unread(buf.getvalue()[2:]) return "" self.headers = self.parse_headers(buf.getvalue()[:idx]) ret = buf.getvalue()[idx+4:] buf.truncate(0) return ret
def writer(self): if self.file_output: fio = open(self.file_output, 'wb') else: fio = StringIO() while True: try: item = self.output_q.get() if item == 'STOP': break msg = map( lambda x: type(x) == unicode and x.encode('utf-8') or x, item ) fio.write(str(msg[0])+'\n') except Exception: traceback.print_exc() if self.raven: self.raven.captureException() finally: self.output_q.task_done() fio.write('END') if not self.file_output: self.content = fio.getvalue() fio.close()
class TBufferedTransport(TTransportBase,CReadableTransport): """Class that wraps another transport and buffers its I/O. The implementation uses a (configurable) fixed-size read buffer but buffers all writes until a flush is performed. """ DEFAULT_BUFFER = 4096 def __init__(self, trans, rbuf_size = DEFAULT_BUFFER): self.__trans = trans self.__wbuf = StringIO() self.__rbuf = StringIO("") self.__rbuf_size = rbuf_size def isOpen(self): return self.__trans.isOpen() def open(self): return self.__trans.open() def close(self): return self.__trans.close() def read(self, sz): ret = self.__rbuf.read(sz) if len(ret) != 0: return ret self.__rbuf = StringIO(self.__trans.read(max(sz, self.__rbuf_size))) return self.__rbuf.read(sz) def write(self, buf): self.__wbuf.write(buf) def flush(self): out = self.__wbuf.getvalue() # reset wbuf before write/flush to preserve state on underlying failure self.__wbuf = StringIO() self.__trans.write(out) self.__trans.flush() # Implement the CReadableTransport interface. @property def cstringio_buf(self): return self.__rbuf def cstringio_refill(self, partialread, reqlen): retstring = partialread if reqlen < self.__rbuf_size: # try to make a read of as much as we can. retstring += self.__trans.read(self.__rbuf_size) # but make sure we do read reqlen bytes. if len(retstring) < reqlen: retstring += self.__trans.readAll(reqlen - len(retstring)) self.__rbuf = StringIO(retstring) return self.__rbuf
def install(self, plugin): directories = conf.supybot.directories.plugins() directory = self._getWritableDirectoryFromList(directories) assert directory is not None dirname = ''.join((self._path, plugin)) fileObject = urllib2.urlopen(self._downloadUrl) fileObject2 = StringIO() fileObject2.write(fileObject.read()) fileObject.close() fileObject2.seek(0) archive = tarfile.open(fileobj=fileObject2, mode='r:gz') prefix = archive.getnames()[0] try: assert archive.getmember(prefix + dirname).isdir() for file in archive.getmembers(): if file.name.startswith(prefix + dirname): extractedFile = archive.extractfile(file) newFileName = os.path.join(*file.name.split('/')[1:]) newFileName = newFileName[len(self._path)-1:] newFileName = os.path.join(directory, newFileName) if os.path.exists(newFileName): assert os.path.isdir(newFileName) shutil.rmtree(newFileName) if extractedFile is None: os.mkdir(newFileName) else: open(newFileName, 'a').write(extractedFile.read()) finally: archive.close() fileObject2.close() del archive, fileObject, fileObject2
def dlexpect(self, pattern, flags=0, deadline=None): """Loop recv listening for a provided regular expression.""" # pull default timeout if deadline is None: deadline = self._deadline if deadline < 1000: deadline += time() buff = StringIO() # loop until pattern has been found while not pattern.search(buff.getvalue()): # wait for data on the socket t = time() timeout = (deadline-t) if (deadline-t>0) else 0.0 if len(select([self],[],[], timeout)[0]) == 0: # deadline reached, terminate return '' # append response to buffer p = buff.tell() try: buff.write(self.recv(100, flags)) except socket.error, e: raise MythError(MythError.SOCKET, e.args) if buff.tell() == p: # no data read from a 'ready' socket, connection terminated raise MythError(MythError.CLOSEDSOCKET) if timeout == 0: break
def read(self, nbytes=None): """ Read up to specified amount from buffer, or whatever is available. """ # flush existing buffer self._rollback_pool = [] data = StringIO() while True: try: # get first item, or return if no more blocks are avaialable tmp = self._buffer[0] except IndexError: break if nbytes: # read only what is requested data.write(tmp.read(nbytes-data.tell())) else: # read all that is available data.write(tmp.read(tmp.blocksize)) if tmp.EOF: # block is exhausted, cycle it into the rollback pool self._rollback_pool.append(self._buffer.popleft()) else: # end of data or request reached, return break self._nbytes += data.tell() return data.getvalue()
def _produce_request(self, topic, messages, partition): message_set_buffer = StringIO() for message in messages: # <<int:1, int:4, str>> encoded_message = struct.pack('>Bi{0}s'.format(len(message)), MAGIC_BYTE, self.compute_checksum(message), message ) message_size = len(encoded_message) bin_format = '>i{0}s'.format(message_size) message_set_buffer.write(struct.pack(bin_format, message_size, encoded_message)) message_set = message_set_buffer.getvalue() # create the request <<unit:4, uint:2, uint:2, str, uint:4, uint:4, str>>> request = ( PRODUCE_REQUEST, len(topic), topic, partition, len(message_set), message_set ) data = struct.pack('>HH{0}sII{1}s'.format(len(topic), len(message_set)), *request ) request_size = len(data) bin_format = '<<uint:4, uint:2, uint:2, str:{0}, uint:4, uint:4, str:{1}>>'.format(len(topic), len(message_set)) kafka_log.debug('produce request: {0} in format {1} ({2} bytes)'.format(request, bin_format, request_size)) return struct.pack('>I{0}s'.format(request_size), request_size, data)
def download_file(self, **kargs): file_id = kargs['file_id'] Model = request.session.model('abc_ipt.com_file') files = Model.search_read([('id', '=', file_id)]) if files: file = files[0] else: return '未发现该文件' tmp_file_name = file['tmp_file_name'] file_name = file['file_name'] path = os.path.abspath(os.path.dirname(sys.argv[0])) filepath = path.replace('\\', '/') + '/myaddons/abc_ipt/com_files/{}'.format(tmp_file_name) try: # 默认模式为‘r’,只读模式 with open(filepath, 'rb') as f: contents = f.read() except Exception as e: _logger = logging.getLogger(__name__) _logger.error(str(e)) return '读取文件失败' fo = StringIO() fo.write(contents) fo.seek(0) data = fo.read() fo.close() return request.make_response(data, headers=[('Content-Disposition', content_disposition(file_name)), ('Content-Type', 'application/octet-stream')], )
class State5380(State): """ This is another state machine itself """ def __init__(self, filename): self.file_pattern = "%s_%%d" % (filename) self.index = 0 self.file_str = StringIO() def next_state(self, line, lastLine): self.file_str.write(lastLine) code = line[0:4] if code == "5880": # Found end of file self.file_str.write(line) curr_filename = self.file_pattern % self.index sys.stdout.write("Writing %s\n" % curr_filename) # Truncate file if already exists out_file = open(curr_filename, 'w') out_file.write(self.file_str.getvalue()) out_file.close() self.file_str = StringIO() self.index = self.index + 1 return STATES["5880"] return STATES["5380"]
def visit_Const(self, node): if node.value is None: return "null" if isinstance(node.value, bool): return str(node.value).lower() if isinstance(node.value, (int, float, long)): return str(node.value) output = StringIO() if self.paramming: output.write("\\") output.write("'") value = node.value.replace("\n", "\\n") value = value.replace("\t", "\\t") value = value.replace("\r", "\\r") value = value.replace("'", "\\'") output.write(value) if self.paramming: output.write("\\") output.write("'") return output.getvalue()
def dlrecv(self, bufsize, flags=0, deadline=None): # pull default timeout if deadline is None: deadline = self._deadline if deadline < 1000: deadline += time() buff = StringIO() # loop until necessary data has been received while bufsize > buff.tell(): # wait for data on the socket t = time() timeout = (deadline-t) if (deadline-t>0) else 0.0 if len(select([self],[],[], timeout)[0]) == 0: # deadline reached, terminate return u'' # append response to buffer p = buff.tell() try: buff.write(self.recv(bufsize-buff.tell(), flags)) except socket.error, e: raise MythError(MythError.SOCKET, e.args) if buff.tell() == p: # no data read from a 'ready' socket, connection terminated raise MythError(MythError.SOCKET, (54, 'Connection reset by peer')) if timeout == 0: break
def fetch_url(url, opener=None, timeout=60.0, chunk_size=16384): if opener is None: opener = urllib2.build_opener() try: output = StringIO() fileobj = yield idiokit.thread(opener.open, url, timeout=timeout) try: while True: data = yield idiokit.thread(fileobj.read, chunk_size) if not data: break output.write(data) finally: fileobj.close() info = fileobj.info() info = email.parser.Parser().parsestr(str(info), headersonly=True) output.seek(0) idiokit.stop(info, output) except urllib2.HTTPError as he: raise HTTPError(he.code, he.msg, he.hdrs, he.fp) except urllib2.URLError as error: if _is_timeout(error.reason): raise FetchUrlTimeout("fetching URL timed out") raise FetchUrlFailed(str(error)) except socket.error as error: if _is_timeout(error): raise FetchUrlTimeout("fetching URL timed out") raise FetchUrlFailed(str(error)) except httplib.HTTPException as error: raise FetchUrlFailed(str(error))
class MockRecvServer(threading.Thread): """ Single threaded server accepts one connection and recv until EOF. """ def __init__(self, port): self._sock = socket.socket() self._sock.bind(('localhost', port)) self._buf = BytesIO() threading.Thread.__init__(self) self.start() def run(self): s = self._sock s.listen(1) con, _ = s.accept() while True: d = con.recv(4096) if not d: break self._buf.write(d) con.close() s.close() self._sock = None def wait(self): while self._sock: time.sleep(0.1) def get_recieved(self): self.wait() self._buf.seek(0) # TODO: have to process string encoding properly. currently we assume that all encoding is utf-8. return list(Unpacker(self._buf, encoding='utf-8'))
def toXML(self): x = StringIO() x.write(L(2, "<disk type='%s'>" % self._type)) if self.driver: # file is ignored at the moment if self.driver == "tap:aio": x.write(L(3, "<driver name='tap' type='aio' />")) if self._type == "block": x.write(L(3, "<source dev='%s' />" % self.source)) else: x.write(L(3, "<source file='%s' />" % self.source)) x.write(L(3, "<target dev='%s' />" % self.target)) if self.readonly: x.write(L(3, "<readonly/>")) x.write(L(2, "</disk>")) content = x.getvalue() x.close() return content
def encode(self): buf = BytesIO() buf.write(pause_cmd._get_packed_fingerprint()) self._encode_one(buf) return buf.getvalue()
def encode(self): buf = BytesIO() buf.write(message_received_t._get_packed_fingerprint()) self._encode_one(buf) return buf.getvalue()
def templatize(src): """ Turns a Django template into something that is understood by xgettext. It does so by translating the Django translation tags into standard gettext function invocations. """ from django.template import Lexer, TOKEN_TEXT, TOKEN_VAR, TOKEN_BLOCK out = StringIO() intrans = False inplural = False singular = [] plural = [] for t in Lexer(src, None).tokenize(): if intrans: if t.token_type == TOKEN_BLOCK: endbmatch = endblock_re.match(t.contents) pluralmatch = plural_re.match(t.contents) if endbmatch: if inplural: out.write(' ngettext(%r,%r,count) ' % (''.join(singular), ''.join(plural))) for part in singular: out.write(blankout(part, 'S')) for part in plural: out.write(blankout(part, 'P')) else: out.write(' gettext(%r) ' % ''.join(singular)) for part in singular: out.write(blankout(part, 'S')) intrans = False inplural = False singular = [] plural = [] elif pluralmatch: inplural = True else: raise SyntaxError( "Translation blocks must not include other block tags: %s" % t.contents) elif t.token_type == TOKEN_VAR: if inplural: plural.append('%%(%s)s' % t.contents) else: singular.append('%%(%s)s' % t.contents) elif t.token_type == TOKEN_TEXT: contents = t.contents.replace('%', '%%') if inplural: plural.append(contents) else: singular.append(contents) else: if t.token_type == TOKEN_BLOCK: imatch = inline_re.match(t.contents) bmatch = block_re.match(t.contents) cmatches = constant_re.findall(t.contents) if imatch: g = imatch.group(1) if g[0] == '"': g = g.strip('"') elif g[0] == "'": g = g.strip("'") out.write(' gettext(%r) ' % g) elif bmatch: for fmatch in constant_re.findall(t.contents): out.write(' _(%s) ' % fmatch) intrans = True inplural = False singular = [] plural = [] elif cmatches: for cmatch in cmatches: out.write(' _(%s) ' % cmatch) else: out.write(blankout(t.contents, 'B')) elif t.token_type == TOKEN_VAR: parts = t.contents.split('|') cmatch = constant_re.match(parts[0]) if cmatch: out.write(' _(%s) ' % cmatch.group(1)) for p in parts[1:]: if p.find(':_(') >= 0: out.write(' %s ' % p.split(':', 1)[1]) else: out.write(blankout(p, 'F')) else: out.write(blankout(t.contents, 'X')) return out.getvalue()
class THttpClient(TTransportBase): """Http implementation of TTransport base.""" def __init__(self, uri_or_host, port=None, path=None): """THttpClient supports two different types constructor parameters. THttpClient(host, port, path) - deprecated THttpClient(uri) Only the second supports https. """ if port is not None: warnings.warn( "Please use the THttpClient('http://host:port/path') syntax", DeprecationWarning, stacklevel=2) self.host = uri_or_host self.port = port assert path self.path = path self.scheme = 'http' else: parsed = urlparse.urlparse(uri_or_host) self.scheme = parsed.scheme assert self.scheme in ('http', 'https') if self.scheme == 'http': self.port = parsed.port or httplib.HTTP_PORT elif self.scheme == 'https': self.port = parsed.port or httplib.HTTPS_PORT self.host = parsed.hostname self.path = parsed.path if parsed.query: self.path += '?%s' % parsed.query self.__wbuf = StringIO() self.__http = None self.__timeout = None self.__custom_headers = None def open(self): if self.scheme == 'http': self.__http = httplib.HTTP(self.host, self.port) else: self.__http = httplib.HTTPS(self.host, self.port) def close(self): self.__http.close() self.__http = None def isOpen(self): return self.__http is not None def setTimeout(self, ms): if not hasattr(socket, 'getdefaulttimeout'): raise NotImplementedError if ms is None: self.__timeout = None else: self.__timeout = ms / 1000.0 def setCustomHeaders(self, headers): self.__custom_headers = headers def read(self, sz): return self.__http.file.read(sz) def write(self, buf): self.__wbuf.write(buf) def __withTimeout(f): def _f(*args, **kwargs): orig_timeout = socket.getdefaulttimeout() socket.setdefaulttimeout(args[0].__timeout) result = f(*args, **kwargs) socket.setdefaulttimeout(orig_timeout) return result return _f def flush(self): if self.isOpen(): self.close() self.open() # Pull data out of buffer data = self.__wbuf.getvalue() self.__wbuf = StringIO() # HTTP request self.__http.putrequest('POST', self.path) # Write headers self.__http.putheader('Host', self.host) self.__http.putheader('Content-Type', 'application/x-thrift') self.__http.putheader('Content-Length', str(len(data))) if not self.__custom_headers or 'User-Agent' not in self.__custom_headers: user_agent = 'Python/THttpClient' script = os.path.basename(sys.argv[0]) if script: user_agent = '%s (%s)' % (user_agent, urllib.quote(script)) self.__http.putheader('User-Agent', user_agent) if self.__custom_headers: for key, val in self.__custom_headers.iteritems(): self.__http.putheader(key, val) self.__http.endheaders() # Write payload self.__http.send(data) # Get reply to flush the request self.code, self.message, self.headers = self.__http.getreply() # Decorate if we know how to timeout if hasattr(socket, 'getdefaulttimeout'): flush = __withTimeout(flush)
def unpack_and_upload(iterator, symbols_upload, bucket_name, bucket_location): necessary_setting_keys = ( 'AWS_ACCESS_KEY', 'AWS_SECRET_ACCESS_KEY', 'SYMBOLS_BUCKET_DEFAULT_LOCATION', 'SYMBOLS_BUCKET_DEFAULT_NAME', 'SYMBOLS_FILE_PREFIX', ) for key in necessary_setting_keys: if not getattr(settings, key): raise ImproperlyConfigured("Setting %s must be set" % key) conn = boto.connect_s3( settings.AWS_ACCESS_KEY, settings.AWS_SECRET_ACCESS_KEY, # Deliberately commented out until we know a better way to do # this. When connecting to S3 on a Python 2.7 on OSX, you can't # get buckets that dots in the name. But applying this calling_format # thing breaks on our Python 2.7 on production. # So it's commented out, in a rush, until we discover a unified # way of dealing with this on local dev environments as well # as in production. # calling_format=boto.s3.connection.OrdinaryCallingFormat(), ) assert bucket_name bucket = conn.lookup(bucket_name) if bucket is None: try: bucket = conn.create_bucket(bucket_name, location=bucket_location) except AttributeError as exception: # This extra exception trap is temporary until we can figure # out why sometimes we get AttributeErrors here. raise AttributeError('%s (bucket_name=%r, bucket_location=%r)' % (exception, bucket_name, bucket_location)) total_uploaded = 0 for member in iterator: key_name = os.path.join(settings.SYMBOLS_FILE_PREFIX, member.name) key = bucket.get_key(key_name) # let's assume first that we need to add a new key prefix = '+' if key: # key already exists, but is it the same size? if key.size != member.size: # file size in S3 is different, upload the new one key = None else: prefix = '=' if not key: key = bucket.new_key(key_name) file = StringIO() file.write(member.extractor().read()) content_type = mimetypes.guess_type(key_name)[0] # default guess for ext in settings.SYMBOLS_MIME_OVERRIDES: if key_name.lower().endswith('.{0}'.format(ext)): content_type = settings.SYMBOLS_MIME_OVERRIDES[ext] key.content_type = content_type symbols_upload.content_type = key.content_type compress = False for ext in settings.SYMBOLS_COMPRESS_EXTENSIONS: if key_name.lower().endswith('.{0}'.format(ext)): compress = True break headers = { 'Content-Type': content_type, } if compress: headers['Content-Encoding'] = 'gzip' out = StringIO() with gzip.GzipFile(fileobj=out, mode='w') as f: f.write(file.getvalue()) value = out.getvalue() else: value = file.getvalue() uploaded = key.set_contents_from_string(value, headers) total_uploaded += uploaded symbols_upload.content += '%s%s,%s\n' % (prefix, key.bucket.name, key.key) symbols_upload.save() return total_uploaded
def toXML(self): x = StringIO() x.write(L(2, "<interface type='%s'>" % self._type)) x.write(L(3, "<source bridge='%s' />" % self.source)) x.write(L(3, "<mac address='%s' />" % self.mac)) if self.target: x.write(L(3, "<target dev='%s' />" % self.target)) if self.script_path: x.write(L(3, "<script path='%s' />" % self.script_path)) x.write(L(2, "</interface>")) content = x.getvalue() x.close() return content
def explore(): """ Returns a gallery consisting of the images of one of the dbs """ job = job_from_request() # Get LMDB db = flask.request.args.get('db', 'train') if 'train' in db.lower(): task = job.train_db_task() elif 'val' in db.lower(): task = job.val_db_task() elif 'test' in db.lower(): task = job.test_db_task() if task is None: raise ValueError('No create_db task for {0}'.format(db)) if task.status != 'D': raise ValueError( "This create_db task's status should be 'D' but is '{0}'".format( task.status)) if task.backend != 'lmdb': raise ValueError( "Backend is {0} while expected backend is lmdb".format( task.backend)) db_path = job.path(task.db_name) labels = task.get_labels() page = int(flask.request.args.get('page', 0)) size = int(flask.request.args.get('size', 25)) label = flask.request.args.get('label', None) if label is not None: try: label = int(label) except ValueError: label = None reader = DbReader(db_path) count = 0 imgs = [] min_page = max(0, page - 5) if label is None: total_entries = reader.total_entries else: total_entries = task.distribution[str(label)] max_page = min((total_entries - 1) / size, page + 5) pages = range(min_page, max_page + 1) for key, value in reader.entries(): if count >= page * size: datum = caffe_pb2.Datum() datum.ParseFromString(value) if label is None or datum.label == label: if datum.encoded: s = StringIO() s.write(datum.data) s.seek(0) img = PIL.Image.open(s) else: import caffe.io arr = caffe.io.datum_to_array(datum) # CHW -> HWC arr = arr.transpose((1, 2, 0)) if arr.shape[2] == 1: # HWC -> HW arr = arr[:, :, 0] elif arr.shape[2] == 3: # BGR -> RGB # XXX see issue #59 arr = arr[:, :, [2, 1, 0]] img = PIL.Image.fromarray(arr) imgs.append({ "label": labels[datum.label], "b64": utils.image.embed_image_html(img) }) if label is None: count += 1 else: datum = caffe_pb2.Datum() datum.ParseFromString(value) if datum.label == int(label): count += 1 if len(imgs) >= size: break return flask.render_template('datasets/images/explore.html', page=page, size=size, job=job, imgs=imgs, labels=labels, pages=pages, label=label, total_entries=total_entries, db=db)
def print_runner_summary(runner_results, junit_results, runner_name='ROSUNIT'): """ Print summary of runner results and actual test results to stdout. For rosunit and rostest, the test is wrapped in an external runner. The results from this runner are important if the runner itself has a failure. @param runner_result: unittest runner result object @type runner_result: _XMLTestResult @param junit_results: Parsed JUnit test results @type junit_results: rosunit.junitxml.Result """ # we have two separate result objects, which can be a bit # confusing. 'result' counts successful _running_ of tests # (i.e. doesn't check for actual test success). The 'r' result # object contains results of the actual tests. buff = StringIO() buff.write("[%s]" % (runner_name) + '-' * 71 + '\n\n') for tc_result in junit_results.test_case_results: buff.write(tc_result.description) for tc_result in runner_results.failures: buff.write("[%s][failed]\n" % tc_result[0]._testMethodName) buff.write('\nSUMMARY\n') if runner_results.wasSuccessful() and (junit_results.num_errors + junit_results.num_failures) == 0: buff.write("\033[32m * RESULT: SUCCESS\033[0m\n") else: buff.write("\033[1;31m * RESULT: FAIL\033[0m\n") # TODO: still some issues with the numbers adding up if tests fail to launch # number of errors from the inner tests, plus add in count for tests # that didn't run properly ('result' object). buff.write(" * TESTS: %s\n" % junit_results.num_tests) num_errors = junit_results.num_errors + len(runner_results.errors) if num_errors: buff.write("\033[1;31m * ERRORS: %s\033[0m\n" % num_errors) else: buff.write(" * ERRORS: 0\n") num_failures = junit_results.num_failures + len(runner_results.failures) if num_failures: buff.write("\033[1;31m * FAILURES: %s\033[0m\n" % num_failures) else: buff.write(" * FAILURES: 0\n") if runner_results.failures: buff.write("\nERROR: The following tests failed to run:\n") for tc_result in runner_results.failures: buff.write(" * " + tc_result[0]._testMethodName + "\n") print(buff.getvalue())
def toXML(self): x = StringIO() x.write(L(1, "<os>")) x.write(L(2, "<type>%s</type>" % self.type)) if self.kernel: x.write(L(2, "<kernel>%s</kernel>" % self.kernel)) if self.initrd: x.write(L(2, "<initrd>%s</initrd>" % self.initrd)) if self.cmdline: x.write(L(2, "<cmdline>%s</cmdline>" % self.cmdline)) x.write(L(1, "</os>")) content = x.getvalue() x.close() return content
class _fileobject(object): """Faux file object attached to a socket object.""" default_bufsize = 8192 name = "<socket>" __slots__ = [ "mode", "bufsize", "softspace", # "closed" is a property, see below "_sock", "_rbufsize", "_wbufsize", "_rbuf", "_wbuf", "_close" ] def __init__(self, sock, mode='rb', bufsize=-1, close=False): self._sock = sock self.mode = mode # Not actually used in this version if bufsize < 0: bufsize = self.default_bufsize self.bufsize = bufsize self.softspace = False # _rbufsize is the suggested recv buffer size. It is *strictly* # obeyed within readline() for recv calls. If it is larger than # default_bufsize it will be used for recv calls within read(). if bufsize == 0: self._rbufsize = 1 elif bufsize == 1: self._rbufsize = self.default_bufsize else: self._rbufsize = bufsize self._wbufsize = bufsize # We use StringIO for the read buffer to avoid holding a list # of variously sized string objects which have been known to # fragment the heap due to how they are malloc()ed and often # realloc()ed down much smaller than their original allocation. self._rbuf = StringIO() self._wbuf = [] # A list of strings self._close = close def _getclosed(self): return self._sock is None closed = property(_getclosed, doc="True if the file is closed") def close(self): try: if self._sock: self.flush() finally: if self._close: self._sock.close() self._sock = None def __del__(self): try: self.close() except: # close() may fail if __init__ didn't complete pass def flush(self): if self._wbuf: buffer = "".join(self._wbuf) self._wbuf = [] self._sock.sendall(buffer) def fileno(self): return self._sock.fileno() def write(self, data): data = str(data) # XXX Should really reject non-string non-buffers if not data: return self._wbuf.append(data) if (self._wbufsize == 0 or self._wbufsize == 1 and '\n' in data or self._get_wbuf_len() >= self._wbufsize): self.flush() def writelines(self, list): # XXX We could do better here for very long lists # XXX Should really reject non-string non-buffers self._wbuf.extend(filter(None, map(str, list))) if (self._wbufsize <= 1 or self._get_wbuf_len() >= self._wbufsize): self.flush() def _get_wbuf_len(self): buf_len = 0 for x in self._wbuf: buf_len += len(x) return buf_len def read(self, size=-1): # Use max, disallow tiny reads in a loop as they are very inefficient. # We never leave read() with any leftover data from a new recv() call # in our internal buffer. rbufsize = max(self._rbufsize, self.default_bufsize) # Our use of StringIO rather than lists of string objects returned by # recv() minimizes memory usage and fragmentation that occurs when # rbufsize is large compared to the typical return value of recv(). buf = self._rbuf buf.seek(0, 2) # seek end if size < 0: # Read until EOF self._rbuf = StringIO() # reset _rbuf. we consume it via buf. while True: data = self._sock.recv(rbufsize) if not data: break buf.write(data) return buf.getvalue() else: # Read until size bytes or EOF seen, whichever comes first buf_len = buf.tell() if buf_len >= size: # Already have size bytes in our buffer? Extract and return. buf.seek(0) rv = buf.read(size) self._rbuf = StringIO() self._rbuf.write(buf.read()) return rv self._rbuf = StringIO() # reset _rbuf. we consume it via buf. while True: left = size - buf_len # recv() will malloc the amount of memory given as its # parameter even though it often returns much less data # than that. The returned data string is short lived # as we copy it into a StringIO and free it. This avoids # fragmentation issues on many platforms. data = self._sock.recv(left) if not data: break n = len(data) if n == size and not buf_len: # Shortcut. Avoid buffer data copies when: # - We have no data in our buffer. # AND # - Our call to recv returned exactly the # number of bytes we were asked to read. return data if n == left: buf.write(data) del data # explicit free break assert n <= left, "recv(%d) returned %d bytes" % (left, n) buf.write(data) buf_len += n del data # explicit free #assert buf_len == buf.tell() return buf.getvalue() def readline(self, size=-1): buf = self._rbuf buf.seek(0, 2) # seek end if buf.tell() > 0: # check if we already have it in our buffer buf.seek(0) bline = buf.readline(size) if bline.endswith('\n') or len(bline) == size: self._rbuf = StringIO() self._rbuf.write(buf.read()) return bline del bline if size < 0: # Read until \n or EOF, whichever comes first if self._rbufsize <= 1: # Speed up unbuffered case buf.seek(0) buffers = [buf.read()] self._rbuf = StringIO() # reset _rbuf. we consume it via buf. data = None recv = self._sock.recv while data != "\n": data = recv(1) if not data: break buffers.append(data) return "".join(buffers) buf.seek(0, 2) # seek end self._rbuf = StringIO() # reset _rbuf. we consume it via buf. while True: data = self._sock.recv(self._rbufsize) if not data: break nl = data.find('\n') if nl >= 0: nl += 1 buf.write(data[:nl]) self._rbuf.write(data[nl:]) del data break buf.write(data) return buf.getvalue() else: # Read until size bytes or \n or EOF seen, whichever comes first buf.seek(0, 2) # seek end buf_len = buf.tell() if buf_len >= size: buf.seek(0) rv = buf.read(size) self._rbuf = StringIO() self._rbuf.write(buf.read()) return rv self._rbuf = StringIO() # reset _rbuf. we consume it via buf. while True: data = self._sock.recv(self._rbufsize) if not data: break left = size - buf_len # did we just receive a newline? nl = data.find('\n', 0, left) if nl >= 0: nl += 1 # save the excess data to _rbuf self._rbuf.write(data[nl:]) if buf_len: buf.write(data[:nl]) break else: # Shortcut. Avoid data copy through buf when returning # a substring of our first recv(). return data[:nl] n = len(data) if n == size and not buf_len: # Shortcut. Avoid data copy through buf when # returning exactly all of our first recv(). return data if n >= left: buf.write(data[:left]) self._rbuf.write(data[left:]) break buf.write(data) buf_len += n #assert buf_len == buf.tell() return buf.getvalue() def readlines(self, sizehint=0): total = 0 list = [] while True: line = self.readline() if not line: break list.append(line) total += len(line) if sizehint and total >= sizehint: break return list # Iterator protocols def __iter__(self): return self def next(self): line = self.readline() if not line: raise StopIteration return line
def print_unittest_summary(result): """ Print summary of python unittest result to stdout @param result: test results """ buff = StringIO() buff.write( "-------------------------------------------------------------\nSUMMARY:\n" ) if result.wasSuccessful(): buff.write("\033[32m * RESULT: SUCCESS\033[0m\n") else: buff.write(" * RESULT: FAIL\n") buff.write(" * TESTS: %s\n" % result.testsRun) buff.write(" * ERRORS: %s [%s]\n" % (len(result.errors), ', '.join(_format_errors(result.errors)))) buff.write( " * FAILURES: %s [%s]\n" % (len(result.failures), ', '.join(_format_errors(result.failures)))) print(buff.getvalue())
class OutBuffer: """Helper class for constructing network packets.""" sizelimit = 65536 def __init__(self): self.buff = StringIO() def add(self, data): """ @type data: str @returns: self @raises OmapiSizeLimitError: """ self.buff.write(data) if self.buff.tell() > self.sizelimit: raise OmapiSizeLimitError() return self def add_net32int(self, integer): """ @type integer: int @param integer: a 32bit unsigned integer @returns: self @raises OmapiSizeLimitError: """ if integer < 0 or integer >= (1 << 32): raise ValueError("not a 32bit unsigned integer") return self.add(struct.pack("!L", integer)) def add_net16int(self, integer): """ @type integer: int @param integer: a 16bit unsigned integer @returns: self @raises OmapiSizeLimitError: """ if integer < 0 or integer >= (1 << 16): raise ValueError("not a 16bit unsigned integer") return self.add(struct.pack("!H", integer)) def add_net32string(self, string): """ @type string: str @param string: maximum length must fit in a 32bit integer @returns: self @raises OmapiSizeLimitError: """ if len(string) >= (1 << 32): raise ValueError("string too long") return self.add_net32int(len(string)).add(string) def add_net16string(self, string): """ @type string: str @param string: maximum length must fit in a 16bit integer @returns: self @raises OmapiSizeLimitError: """ if len(string) >= (1 << 16): raise ValueError("string too long") return self.add_net16int(len(string)).add(string) def add_bindict(self, items): """ >>> OutBuffer().add_bindict(dict(foo="bar")).getvalue() '\\x00\\x03foo\\x00\\x00\\x00\\x03bar\\x00\\x00' @type items: [(str, str)] or {str: str} @returns: self @raises OmapiSizeLimitError: """ if not isinstance(items, list): items = items.items() for key, value in items: self.add_net16string(key).add_net32string(value) return self.add("\x00\x00") # end marker def getvalue(self): """ @rtype: str """ return self.buff.getvalue() def consume(self, length): """ @type length: int @returns: self """ self.buff = StringIO(self.getvalue()[length:]) return self
def generate_record0(self): # MOBI header {{{ metadata = self.oeb.metadata bt = 0x002 if self.primary_index_record_idx is not None: if False and self.indexer.is_flat_periodical: # Disabled as setting this to 0x102 causes the Kindle to not # auto archive the issues bt = 0x102 elif self.indexer.is_periodical: # If you change this, remember to change the cdetype in the EXTH # header as well bt = 0x103 if self.indexer.is_flat_periodical else 0x101 from calibre.ebooks.mobi.writer8.exth import build_exth exth = build_exth(metadata, prefer_author_sort=self.opts.prefer_author_sort, is_periodical=self.is_periodical, share_not_sync=self.opts.share_not_sync, cover_offset=self.cover_offset, thumbnail_offset=self.thumbnail_offset, start_offset=self.serializer.start_offset, mobi_doctype=bt) first_image_record = None if self.resources: used_images = self.serializer.used_images first_image_record = len(self.records) self.resources.serialize(self.records, used_images) last_content_record = len(self.records) - 1 # FCIS/FLIS (Seems to serve no purpose) flis_number = len(self.records) self.records.append(FLIS) fcis_number = len(self.records) self.records.append(fcis(self.text_length)) # EOF record self.records.append(b'\xE9\x8E\x0D\x0A') record0 = StringIO() # The MOBI Header record0.write( pack( b'>HHIHHHH', self.compression, # compression type # compression type 0, # Unused self.text_length, # Text length self. last_text_record_idx, # Number of text records or last tr idx RECORD_SIZE, # Text record size 0, # Unused 0 # Unused )) # 0 - 15 (0x0 - 0xf) uid = random.randint(0, 0xffffffff) title = normalize(unicode_type(metadata.title[0])).encode('utf-8') # 0x0 - 0x3 record0.write(b'MOBI') # 0x4 - 0x7 : Length of header # 0x8 - 0x11 : MOBI type # type meaning # 0x002 MOBI book (chapter - chapter navigation) # 0x101 News - Hierarchical navigation with sections and articles # 0x102 News feed - Flat navigation # 0x103 News magazine - same as 0x101 # 0xC - 0xF : Text encoding (65001 is utf-8) # 0x10 - 0x13 : UID # 0x14 - 0x17 : Generator version record0.write(pack(b'>IIIII', 0xe8, bt, 65001, uid, 6)) # 0x18 - 0x1f : Unknown record0.write(b'\xff' * 8) # 0x20 - 0x23 : Secondary index record sir = 0xffffffff if (self.primary_index_record_idx is not None and self.indexer.secondary_record_offset is not None): sir = (self.primary_index_record_idx + self.indexer.secondary_record_offset) record0.write(pack(b'>I', sir)) # 0x24 - 0x3f : Unknown record0.write(b'\xff' * 28) # 0x40 - 0x43 : Offset of first non-text record record0.write(pack(b'>I', self.first_non_text_record_idx)) # 0x44 - 0x4b : title offset, title length record0.write(pack(b'>II', 0xe8 + 16 + len(exth), len(title))) # 0x4c - 0x4f : Language specifier record0.write(iana2mobi(str(metadata.language[0]))) # 0x50 - 0x57 : Input language and Output language record0.write(b'\0' * 8) # 0x58 - 0x5b : Format version # 0x5c - 0x5f : First image record number record0.write( pack( b'>II', 6, first_image_record if first_image_record else len(self.records))) # 0x60 - 0x63 : First HUFF/CDIC record number # 0x64 - 0x67 : Number of HUFF/CDIC records # 0x68 - 0x6b : First DATP record number # 0x6c - 0x6f : Number of DATP records record0.write(b'\0' * 16) # 0x70 - 0x73 : EXTH flags # Bit 6 (0b1000000) being set indicates the presence of an EXTH header # Bit 12 being set indicates the presence of embedded fonts # The purpose of the other bits is unknown exth_flags = 0b1010000 if self.is_periodical: exth_flags |= 0b1000 if self.resources.has_fonts: exth_flags |= 0b1000000000000 record0.write(pack(b'>I', exth_flags)) # 0x74 - 0x93 : Unknown record0.write(b'\0' * 32) # 0x94 - 0x97 : DRM offset # 0x98 - 0x9b : DRM count # 0x9c - 0x9f : DRM size # 0xa0 - 0xa3 : DRM flags record0.write(pack(b'>IIII', 0xffffffff, 0xffffffff, 0, 0)) # 0xa4 - 0xaf : Unknown record0.write(b'\0' * 12) # 0xb0 - 0xb1 : First content record number # 0xb2 - 0xb3 : last content record number # (Includes Image, DATP, HUFF, DRM) record0.write(pack(b'>HH', 1, last_content_record)) # 0xb4 - 0xb7 : Unknown record0.write(b'\0\0\0\x01') # 0xb8 - 0xbb : FCIS record number record0.write(pack(b'>I', fcis_number)) # 0xbc - 0xbf : Unknown (FCIS record count?) record0.write(pack(b'>I', 1)) # 0xc0 - 0xc3 : FLIS record number record0.write(pack(b'>I', flis_number)) # 0xc4 - 0xc7 : Unknown (FLIS record count?) record0.write(pack(b'>I', 1)) # 0xc8 - 0xcf : Unknown record0.write(b'\0' * 8) # 0xd0 - 0xdf : Unknown record0.write(pack(b'>IIII', 0xffffffff, 0, 0xffffffff, 0xffffffff)) # 0xe0 - 0xe3 : Extra record data # Extra record data flags: # - 0b1 : <extra multibyte bytes><size> # - 0b10 : <TBS indexing description of this HTML record><size> # - 0b100: <uncrossable breaks><size> # Setting bit 2 (0x2) disables <guide><reference type="start"> functionality extra_data_flags = 0b1 # Has multibyte overlap bytes if self.primary_index_record_idx is not None: extra_data_flags |= 0b10 if WRITE_UNCROSSABLE_BREAKS: extra_data_flags |= 0b100 record0.write(pack(b'>I', extra_data_flags)) # 0xe4 - 0xe7 : Primary index record record0.write( pack( b'>I', 0xffffffff if self.primary_index_record_idx is None else self.primary_index_record_idx)) record0.write(exth) record0.write(title) record0 = record0.getvalue() # Add some buffer so that Amazon can add encryption information if this # MOBI is submitted for publication record0 += (b'\0' * (1024 * 8)) self.records[0] = align_block(record0)
def format_read_header(read_header): """Format a dictionary representation of an SFF read header as text. """ out = StringIO() out.write('\n>%s\n' % read_header['Name']) timestamp, hashchar, region, location = decode_accession( read_header['Name']) out.write(' Run Prefix: R_%d_%02d_%02d_%02d_%02d_%02d_\n' % timestamp) out.write(' Region #: %d\n' % region) out.write(' XY Location: %04d_%04d\n' % location) out.write('\n') for key, fmt in zip(read_header_fields, read_header_formats): val = read_header[key] out.write(fmt % val) return out.getvalue()
def run_process_with_timeout(args, filename_in=None, filename_out=None, filename_err=None, cwd=None, timeout=CFG_MISCUTIL_DEFAULT_PROCESS_TIMEOUT, sudo=None): """Execute the specified process but within a certain timeout. @param args: the actuall process. This should be a list of string as in: ['/usr/bin/foo', '--bar', 'baz'] @type args: list of string @param filename_in: the path to a file that should be provided as standard input to the process. If None this will default to /dev/null @type filename_in: string @param filename_out: Desired filename for stdout output (optional; see below). @type filename_out: string @param filename_err: Desired filename for stderr output (optional; see below). @type filename_err: string @param cwd: the path from where to execute the process @type cwd: string @param timeout: the timeout in seconds after which to consider the process execution as failed. a Timeout exception will be raised @type timeout: int @param sudo: the optional name of the user under which to execute the process (by using sudo, without prompting for a password) @type sudo: string @return: Tuple (exit code, string containing stdout output buffer, string containing stderr output buffer). However, if either filename_out or filename_err are defined, then the output buffers are not passed back but rather written into filename_out/filename_err pathnames. This is useful for commands that produce big files, for which it is not practical to pass results back to the callers in a Python text buffer. Note that it is the client's responsibility to name these files in the proper fashion (e.g. to be unique) and to close these files after use. @rtype: (number, string, string) @raise Timeout: if the process does not terminate within the timeout """ stdout = stderr = None if filename_in is not None: stdin = open(filename_in) else: ## FIXME: should use NUL on Windows stdin = open('/dev/null', 'r') if filename_out: stdout = open(filename_out, 'w') if filename_err: stderr = open(filename_err, 'w') tmp_stdout = StringIO() tmp_stderr = StringIO() if sudo is not None: args = ['sudo', '-u', sudo, '-S'] + list(args) ## See: <http://stackoverflow.com/questions/3876886/timeout-a-subprocess> process = subprocess.Popen(args, stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True, cwd=cwd, preexec_fn=os.setpgrp) ## See: <http://stackoverflow.com/questions/375427/non-blocking-read-on-a-stream-in-python> fd = process.stdout.fileno() fl = fcntl.fcntl(fd, fcntl.F_GETFL) fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK) fd = process.stderr.fileno() fl = fcntl.fcntl(fd, fcntl.F_GETFL) fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK) fd_to_poll = [process.stdout, process.stderr] select_timeout = 0.5 t1 = time.time() try: while process.poll() is None: if time.time() - t1 >= timeout: if process.stdin is not None: process.stdin.close() time.sleep(1) if process.poll() is None: ## See: <http://stackoverflow.com/questions/3876886/timeout-a-subprocess> os.killpg(process.pid, signal.SIGTERM) time.sleep(1) if process.poll() is None: os.killpg(process.pid, signal.SIGKILL) try: os.waitpid(process.pid, 0) except OSError: pass raise Timeout() for fd in select.select(fd_to_poll, [], [], select_timeout)[0]: if fd == process.stdout: buf = process.stdout.read(65536) if stdout is None: tmp_stdout.write(buf) else: stdout.write(buf) elif fd == process.stderr: buf = process.stderr.read(65536) if stderr is None: tmp_stderr.write(buf) else: stderr.write(buf) else: raise OSError("fd %s is not a valid file descriptor" % fd) finally: while True: ## Let's just read what is remaining to read. for fd in select.select(fd_to_poll, [], [], select_timeout)[0]: if fd == process.stdout: buf = process.stdout.read(65536) tmp_stdout.write(buf) if stdout is not None: stdout.write(buf) elif fd == process.stderr: buf = process.stderr.read(65536) tmp_stderr.write(buf) if stderr is not None: stderr.write(buf) else: raise OSError("fd %s is not a valid file descriptor" % fd) else: break return process.poll(), tmp_stdout.getvalue(), tmp_stderr.getvalue()
def multipart_encode(self, vars): "Enconde form data (vars dict)" boundary = mimetools.choose_boundary() buf = StringIO() for key, value in vars.items(): if not isinstance(value, file): buf.write('--%s\r\n' % boundary) buf.write('Content-Disposition: form-data; name="%s"' % key) buf.write('\r\n\r\n' + value + '\r\n') else: fd = value file_size = os.fstat(fd.fileno())[stat.ST_SIZE] filename = fd.name.split('/')[-1] contenttype = mimetypes.guess_type( filename)[0] or 'application/octet-stream' buf.write('--%s\r\n' % boundary) buf.write( 'Content-Disposition: form-data; name="%s"; filename="%s"\r\n' % (key, filename)) buf.write('Content-Type: %s\r\n' % contenttype) # buffer += 'Content-Length: %s\r\n' % file_size fd.seek(0) buf.write('\r\n' + fd.read() + '\r\n') buf.write('--' + boundary + '--\r\n\r\n') buf = buf.getvalue() return boundary, buf
def format_read_data(read_data, read_header): """Format a dictionary representation of an SFF read data as text. The read data is expected to be in native flowgram format. """ out = StringIO() out.write('\n') out.write('Flowgram:') for x in read_data['flowgram_values']: out.write('\t%01.2f' % (x * 0.01)) out.write('\n') out.write('Flow Indexes:') current_index = 0 for i in read_data['flow_index_per_base']: current_index = current_index + i out.write('\t%d' % current_index) out.write('\n') out.write('Bases:\t') # Roche uses 1-based indexing left_idx = read_header['clip_qual_left'] - 1 right_idx = read_header['clip_qual_right'] - 1 for i, base in enumerate(read_data['Bases']): if (i < left_idx) or (i > right_idx): out.write(base.lower()) else: out.write(base.upper()) out.write('\n') out.write('Quality Scores:') for score in read_data['quality_scores']: out.write('\t%d' % score) out.write('\n') return out.getvalue()
def format_hex(data, indent="", width=16, out=None): if out == None: out = StringIO() #StringIO经常被用来作为字符串的缓存,应为StringIO有个好处,他的有些接口和文件操作是一致的,也就是说用同样的代码,可以同时当成文件操作或者StringIO操作 strout = True else: strout = False indent += "%08x: " ofs = 0 for block in blocks(data, width): #将一个字符串按照指定的长度分成多个字符串段 out.write(indent % ofs) out.write(' '.join(map(lambda x: x.encode('hex'), block))) if len(block) < width: out.write( ' ' * (width - len(block)) ) out.write(' ') out.write(''.join(censor(block))) out.write(os.linesep) ofs += len(block) if strout: return out.getvalue()
def do_build_apk_report(self, a): output = StringIO() a.get_files_types() output.write("[FILES] \n") for i in a.get_files(): try: output.write("\t%s %s %x\n" % ( i, a.files[i], a.files_crc32[i], )) except KeyError: output.write("\t%s %x\n" % ( i, a.files_crc32[i], )) output.write("\n[PERMISSIONS] \n") details_permissions = a.get_details_permissions() for i in details_permissions: output.write("\t%s %s\n" % ( i, details_permissions[i], )) output.write("\n[MAIN ACTIVITY]\n\t%s\n" % (a.get_main_activity(), )) output.write("\n[ACTIVITIES] \n") activities = a.get_activities() for i in activities: filters = a.get_intent_filters("activity", i) output.write("\t%s %s\n" % ( i, filters or "", )) output.write("\n[SERVICES] \n") services = a.get_services() for i in services: filters = a.get_intent_filters("service", i) output.write("\t%s %s\n" % ( i, filters or "", )) output.write("\n[RECEIVERS] \n") receivers = a.get_receivers() for i in receivers: filters = a.get_intent_filters("receiver", i) output.write("\t%s %s\n" % ( i, filters or "", )) output.write("\n[PROVIDERS]\n\t%s\n\n" % (a.get_providers(), )) vm = dvm.DalvikVMFormat(a.get_dex()) vmx = analysis.uVMAnalysis(vm) output.write("Native code : %s\n" % (analysis.is_native_code(vmx), )) output.write("Dynamic code : %s\n" % (analysis.is_dyn_code(vmx), )) output.write("Reflection code : %s\n" % (analysis.is_reflection_code(vmx), )) output.write("ASCII Obfuscation: %s\n\n" % (analysis.is_ascii_obfuscation(vm), )) for i in vmx.get_methods(): i.create_tags() if not i.tags.empty(): output.write("%s %s %s\n" % ( i.method.get_class_name(), i.method.get_name(), i.tags, )) return output
def list_directory(self, path): """Helper to produce a directory listing (absent index.html). Return value is either a file object, or None (indicating an error). In either case, the headers are sent, making the interface the same as for send_head(). """ try: list = os.listdir(path) except os.error: self.send_error(404, "No permission to list directory") return None list.sort(key=lambda a: a.lower()) f = StringIO() displaypath = cgi.escape(urllib.unquote(self.path)) f.write('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">') f.write("<html>\n<title>Directory listing for %s</title>\n" % displaypath) f.write("<body>\n<h2>Directory listing for %s</h2>\n" % displaypath) f.write("<hr>\n") f.write("<form ENCTYPE=\"multipart/form-data\" method=\"post\">") f.write("<input name=\"file\" type=\"file\"/>") f.write("<input type=\"submit\" value=\"upload\"/>") f.write("              ") f.write("<input type=\"button\" value=\"HomePage\" onClick=\"location='/'\">") f.write("</form>\n") f.write("<hr>\n<ul>\n") for name in list: fullname = os.path.join(path, name) colorName = displayname = linkname = name # Append / for directories or @ for symbolic links if os.path.isdir(fullname): colorName = '<span style="background-color: #CEFFCE;">' + name + '/</span>' displayname = name linkname = name + "/" if os.path.islink(fullname): colorName = '<span style="background-color: #FFBFFF;">' + name + '@</span>' displayname = name # Note: a link to a directory displays with @ and links with / filename = os.getcwd() + '/' + displaypath + displayname f.write('<table><tr><td width="60%%"><a href="%s">%s</a></td><td width="20%%">%s</td><td width="20%%">%s</td></tr>\n' % (urllib.quote(linkname), colorName, sizeof_fmt(os.path.getsize(filename)), modification_date(filename))) f.write("</table>\n<hr>\n</body>\n</html>\n") length = f.tell() f.seek(0) self.send_response(200) self.send_header("Content-type", "text/html") self.send_header("Content-Length", str(length)) self.end_headers() return f
class BaseWS: "Infraestructura basica para interfaces webservices de AFIP" def __init__(self, reintentos=1): self.reintentos = reintentos self.xml = self.client = self.Log = None self.params_in = {} self.inicializar() self.Token = self.Sign = "" self.LanzarExcepciones = True def inicializar(self): self.Excepcion = self.Traceback = "" self.XmlRequest = self.XmlResponse = "" def Conectar(self, cache=None, wsdl=None, proxy="", wrapper=None, cacert=None, timeout=30, soap_server=None): "Conectar cliente soap del web service" try: # analizar transporte y servidor proxy: if wrapper: Http = set_http_wrapper(wrapper) self.Version = self.Version + " " + Http._wrapper_version if isinstance(proxy, dict): proxy_dict = proxy else: proxy_dict = parse_proxy(proxy) if self.H**O or not wsdl: wsdl = self.WSDL # agregar sufijo para descargar descripción del servicio ?WSDL o ?wsdl if not wsdl.endswith(self.WSDL[-5:]) and wsdl.startswith("http"): wsdl += self.WSDL[-5:] if not cache or self.H**O: # use 'cache' from installation base directory cache = os.path.join(self.InstallDir, 'cache') self.log("Conectando a wsdl=%s cache=%s proxy=%s" % (wsdl, cache, proxy_dict)) # analizar espacio de nombres (axis vs .net): ns = 'ser' if self.WSDL[-5:] == "?wsdl" else None self.client = SoapClient(wsdl=wsdl, cache=cache, proxy=proxy_dict, cacert=cacert, timeout=timeout, ns=ns, soap_server=soap_server, trace="--trace" in sys.argv) self.cache = cache # utilizado por WSLPG y WSAA (Ticket de Acceso) self.wsdl = wsdl # utilizado por TrazaMed (para corregir el location) return True except: ex = traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback) self.Traceback = ''.join(ex) try: self.Excepcion = traceback.format_exception_only( sys.exc_type, sys.exc_value)[0] except: self.Excepcion = u"<no disponible>" if self.LanzarExcepciones: raise return False def log(self, msg): "Dejar mensaje en bitacora de depuración (método interno)" if not isinstance(msg, unicode): msg = unicode(msg, 'utf8', 'ignore') if not self.Log: self.Log = StringIO() self.Log.write(msg) self.Log.write('\n\r') def DebugLog(self): "Devolver y limpiar la bitácora de depuración" if self.Log: msg = self.Log.getvalue() # limpiar log self.Log.close() self.Log = None else: msg = u'' return msg def LoadTestXML(self, xml): "Cargar un archivo de pruebas con la respuesta simulada (depuración)" # si el parametro es un nombre de archivo, cargar el contenido: if os.path.exists(xml): xml = open(xml).read() class DummyHTTP: def __init__(self, xml_response): self.xml_response = xml_response def request(self, location, method, body, headers): return {}, self.xml_response self.client.http = DummyHTTP(xml) @property def xml_request(self): return self.XmlRequest @property def xml_response(self): return self.XmlResponse def AnalizarXml(self, xml=""): "Analiza un mensaje XML (por defecto el ticket de acceso)" try: if not xml or xml == 'XmlResponse': xml = self.XmlResponse elif xml == 'XmlRequest': xml = self.XmlRequest self.xml = SimpleXMLElement(xml) return True except Exception, e: self.Excepcion = traceback.format_exception_only( sys.exc_type, sys.exc_value)[0] return False
def templatize(src, origin=None): """ Turns a Django template into something that is understood by xgettext. It does so by translating the Django translation tags into standard gettext function invocations. """ from django.template import (Lexer, TOKEN_TEXT, TOKEN_VAR, TOKEN_BLOCK, TOKEN_COMMENT, TRANSLATOR_COMMENT_MARK) out = StringIO() intrans = False inplural = False singular = [] plural = [] incomment = False comment = [] for t in Lexer(src, origin).tokenize(): if incomment: if t.token_type == TOKEN_BLOCK and t.contents == 'endcomment': content = u''.join(comment) translators_comment_start = None for lineno, line in enumerate(content.splitlines(True)): if line.lstrip().startswith(TRANSLATOR_COMMENT_MARK): translators_comment_start = lineno for lineno, line in enumerate(content.splitlines(True)): if translators_comment_start is not None and lineno >= translators_comment_start: out.write(u' # %s' % line) else: out.write(u' #\n') incomment = False comment = [] else: comment.append(t.contents) elif intrans: if t.token_type == TOKEN_BLOCK: endbmatch = endblock_re.match(t.contents) pluralmatch = plural_re.match(t.contents) if endbmatch: if inplural: out.write(' ngettext(%r,%r,count) ' % (''.join(singular), ''.join(plural))) for part in singular: out.write(blankout(part, 'S')) for part in plural: out.write(blankout(part, 'P')) else: out.write(' gettext(%r) ' % ''.join(singular)) for part in singular: out.write(blankout(part, 'S')) intrans = False inplural = False singular = [] plural = [] elif pluralmatch: inplural = True else: filemsg = '' if origin: filemsg = 'file %s, ' % origin raise SyntaxError( "Translation blocks must not include other block tags: %s (%sline %d)" % (t.contents, filemsg, t.lineno)) elif t.token_type == TOKEN_VAR: if inplural: plural.append('%%(%s)s' % t.contents) else: singular.append('%%(%s)s' % t.contents) elif t.token_type == TOKEN_TEXT: contents = t.contents.replace('%', '%%') if inplural: plural.append(contents) else: singular.append(contents) else: if t.token_type == TOKEN_BLOCK: imatch = inline_re.match(t.contents) bmatch = block_re.match(t.contents) cmatches = constant_re.findall(t.contents) if imatch: g = imatch.group(1) if g[0] == '"': g = g.strip('"') elif g[0] == "'": g = g.strip("'") out.write(' gettext(%r) ' % g) elif bmatch: for fmatch in constant_re.findall(t.contents): out.write(' _(%s) ' % fmatch) intrans = True inplural = False singular = [] plural = [] elif cmatches: for cmatch in cmatches: out.write(' _(%s) ' % cmatch) elif t.contents == 'comment': incomment = True else: out.write(blankout(t.contents, 'B')) elif t.token_type == TOKEN_VAR: parts = t.contents.split('|') cmatch = constant_re.match(parts[0]) if cmatch: out.write(' _(%s) ' % cmatch.group(1)) for p in parts[1:]: if p.find(':_(') >= 0: out.write(' %s ' % p.split(':', 1)[1]) else: out.write(blankout(p, 'F')) elif t.token_type == TOKEN_COMMENT: out.write(' # %s' % t.contents) else: out.write(blankout(t.contents, 'X')) return out.getvalue()
def __repr__(self): tables = self.get_many_to_many_tables() tables.extend(self.get_tables_with_no_pks()) models = self.models s = StringIO() engine = self.config.engine if not isinstance(engine, basestring): engine = str(engine.url) s.write(constants.HEADER_DECL % engine) if 'postgres' in engine: s.write(constants.PG_IMPORT) self.used_table_names = [] self.used_model_names = [] self.table_model_dict = {} # Kamil Edit for table in tables: if table not in self.tables: continue table_name = self.find_new_name(table.name, self.used_table_names) self.used_table_names.append(table_name) s.write('%s = %s\n\n' % (table_name, self._table_repr(table))) for model in models: s.write(model.__repr__()) s.write("\n\n") if self.config.example or self.config.interactive: s.write(constants.EXAMPLE_DECL % (models[0].__name__, models[0].__name__)) if self.config.interactive: s.write(constants.INTERACTIVE % ([model.__name__ for model in models], models[0].__name__)) return s.getvalue()
def encode(self): buf = BytesIO() buf.write(multidim_array_t._get_packed_fingerprint()) self._encode_one(buf) return buf.getvalue()
def do_POST(self): """Serve a POST request.""" r, info = self.deal_post_data() #print r, info, "by: ", self.client_address f = StringIO() f.write('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">') f.write("<html>\n<title>Upload Result Page</title>\n") f.write("<body>\n<h2>Upload Result Page</h2>\n") f.write("<hr>\n") if r: f.write("<strong>Success:</strong>") else: f.write("<strong>Failed:</strong>") f.write(info) f.write("<br><a href=\"%s\">back</a>" % self.headers['referer']) f.write("<hr><small>Powered By: bones7456, check new version at ") f.write("<a href=\"http://li2z.cn/?s=SimpleHTTPServerWithUpload\">") f.write("here</a>.</small></body>\n</html>\n") length = f.tell() f.seek(0) self.send_response(200) self.send_header("Content-type", "text/html") self.send_header("Content-Length", str(length)) self.end_headers() if f: self.copyfile(f, self.wfile) f.close()
def toXML(self, level=0, indent=u'\t'): """Return an XML representation of this job Arguments: level: The level of indentation indent: The indentation string """ xml = StringIO() indentation = u''.join(indent for x in range(0, level)) # Open tag xml.write(indentation) xml.write(u'<job id="%s"' % self.id) if self.namespace is not None: xml.write(u' namespace="%s"' % self.namespace) xml.write(u' name="%s"' % self.name) if self.version is not None: xml.write(u' version="%s"' % self.version) if self.dv_namespace is not None: xml.write(u' dv-namespace="%s"' % self.dv_namespace) if self.dv_name is not None: xml.write(u' dv-name="%s"' % self.dv_name) if self.dv_version is not None: xml.write(u' dv-version="%s"' % self.dv_version) if self.level is not None: xml.write(u' level="%s"' % self.level) if self.compound is not None: xml.write(u' compound="%s"' % self.compound) xml.write(u'>\n') # Arguments if len(self.arguments) > 0: xml.write(indentation) xml.write(indent) xml.write(u'<argument>') xml.write(u' '.join(unicode(x) for x in self.arguments)) xml.write(u'</argument>\n') # Profiles if len(self.profiles) > 0: for pro in self.profiles: xml.write(indentation) xml.write(indent) xml.write(u'%s\n' % pro.toXML()) # Stdin/xml/err if self.stdin is not None: xml.write(indentation) xml.write(indent) xml.write(self.stdin.toStdioXML('stdin')) xml.write(u'\n') if self.stdout is not None: xml.write(indentation) xml.write(indent) xml.write(self.stdout.toStdioXML('stdout')) xml.write(u'\n') if self.stderr is not None: xml.write(indentation) xml.write(indent) xml.write(self.stderr.toStdioXML('stderr')) xml.write(u'\n') # Uses if len(self.uses) > 0: for use in self.uses: xml.write(indentation) xml.write(indent) xml.write(use.toXML()) xml.write(u'\n') # Close tag xml.write(indentation) xml.write(u'</job>') result = xml.getvalue() xml.close() return result
def list_directory(self, path): """Helper to produce a directory listing (absent index.html). Return value is either a file object, or None (indicating an error). In either case, the headers are sent, making the interface the same as for send_head(). """ try: list = os.listdir(path) except os.error: self.send_error(404, "No permission to list directory") return None list.sort(key=lambda a: a.lower()) f = StringIO() displaypath = cgi.escape(urllib.unquote(self.path)) f.write('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">') f.write("<html>\n<title>Directory listing for %s</title>\n" % displaypath) f.write("<body>\n<h2>Directory listing for %s</h2>\n" % displaypath) f.write("<hr>\n") f.write("<form ENCTYPE=\"multipart/form-data\" method=\"post\">") f.write("<input name=\"file\" type=\"file\"/>") f.write("<input type=\"submit\" value=\"upload\"/></form>\n") f.write("<hr>\n<ul>\n") for name in list: fullname = os.path.join(path, name) displayname = linkname = name # Append / for directories or @ for symbolic links if os.path.isdir(fullname): displayname = name + "/" linkname = name + "/" if os.path.islink(fullname): displayname = name + "@" # Note: a link to a directory displays with @ and links with / f.write('<li><a href="%s">%s</a>\n' % (urllib.quote(linkname), cgi.escape(displayname))) f.write("</ul>\n<hr>\n</body>\n</html>\n") length = f.tell() f.seek(0) self.send_response(200) self.send_header("Content-type", "text/html") self.send_header("Content-Length", str(length)) self.end_headers() return f
def toXML(self): xml = StringIO() if self.link is None: link = self.file.getLink() else: link = self.link if self.optional is None: optional = self.file.getOptional() else: optional = self.optional if self.register is None: register = self.file.getRegister() else: register = self.register if self.transfer is None: transfer = self.file.getTransfer() else: transfer = self.transfer type = self.file.getType() temporaryHint = self.temporaryHint xml.write(u'<uses file="%s"' % self.file.getFilename()) if temporaryHint is not None: if isinstance(temporaryHint, bool): xml.write(u' temporaryHint="%s"' % unicode(temporaryHint).lower()) else: xml.write(u' temporaryHint="%s"' % temporaryHint) if link is not None: xml.write(u' link="%s"' % link) if optional is not None: if isinstance(optional, bool): xml.write(u' optional="%s"' % unicode(optional).lower()) else: xml.write(u' optional="%s"' % optional) if register is not None: if isinstance(register, bool): xml.write(u' register="%s"' % unicode(register).lower()) else: xml.write(u' register="%s"' % register) if transfer is not None: if isinstance(transfer, bool): xml.write(u' transfer="%s"' % unicode(transfer).lower()) else: xml.write(u' transfer="%s"' % transfer) if type is not None: xml.write(u' type="%s"' % type) xml.write(u'/>') result = xml.getvalue() xml.close() return result
def toXML(self, level=0, indent=u'\t'): xml = StringIO() indentation = ''.join([indent for x in range(0, level)]) xml.write(indentation) xml.write(u'<child ref="%s">\n' % self.child.getID()) for parent in self.parents: xml.write(indentation) xml.write(indent) xml.write(u'<parent ref="%s"/>\n' % parent.getID()) xml.write(indentation) xml.write(u'</child>') result = xml.getvalue() xml.close() return result