def _copy_chunks(self): init = self.info.get_chunk_name(0) # initial chunk name if self.info.get_count() > 1: with lopen(init, "rb+") as fpo: # first chunkfile for i in range(1, self.info.get_count()): # input file # seek to beginning of chunk, # to get rid of overlapping chunks fpo.seek(self.info.get_chunk_range(i - 1)[1] + 1) filename = "{0}.chunk{1:d}".format(self.path, i) buf = 32 << 10 with lopen(filename, mode='rb') as fpi: while True: # copy in chunks, consumes less memory data = fpi.read(buf) if not data: break fpo.write(data) if fpo.tell() < self.info.get_chunk_range(i)[1]: remove(init) self.info.remove() # there are probably invalid chunks raise Exception( "Downloaded content was smaller than expected") remove(filename) # remove chunk if self.name: filepath = os.path.join(os.path.dirname(self.path), self.name) self.set_path(filepath) shutil.move(init, self.path) self.info.remove() # remove info file
def get_handle(self): """ Returns a Curl handle ready to use for perform/multiperform. """ self.set_request_context(self.p.url, self.p.get, self.p.post, self.p.referer, self.p.cookies) self.setopt(pycurl.WRITEFUNCTION, self.write_body) self.setopt(pycurl.HEADERFUNCTION, self.write_header) try: self.fp.close() except AttributeError: pass # request all bytes, since some servers in russia seems to have a # defect arihmetic unit filename = self.p.info.get_chunk_name(self.id) if self.resume: self.fp = lopen(filename, mode='ab') self.arrived = self.fp.tell() if not self.arrived: self.arrived = os.stat(filename).st_size if self.range: # do nothing if chunk already finished if self.arrived + self.range[0] >= self.range[1]: return None # as last chunk dont set end range, so we get everything if self.id == len(self.p.info.chunks) - 1: range = '{0:d}-'.format(self.arrived + self.range[0]) else: range = '{0:d}-{1:d}'.format( self.arrived + self.range[0], min(self.range[1] + 1, self.p.size - 1)) self.log.debug("Chunked resume with range {0}".format(range)) self.setopt(pycurl.RANGE, range) else: self.log.debug("Resume File from {0:d}".format(self.arrived)) self.setopt(pycurl.RESUME_FROM, self.arrived) else: if self.range: if self.id == len(self.p.info.chunks) - 1: # see above range = "{0:d}-".format(self.range[0]) else: range = "{0:d}-{1:d}".format( self.range[0], min(self.range[1] + 1, self.p.size - 1)) self.log.debug("Chunked with range {0}".format(range)) self.setopt(pycurl.RANGE, range) self.fp = lopen(filename, mode='wb') return self.c
def _check_version(self): """ Get db version. """ try: with lopen(self.VERSION_FILE, mode='rb') as fp: v = int(fp.read().strip()) except IOError: with lopen(self.VERSION_FILE, mode='wb') as fp: fp.write(str(DB_VERSION)) return None return v
def parse_attributes(self, filename, name, folder=""): """ Parse attribute dict from plugin. """ with lopen(filename, mode='rb') as fp: content = fp.read() attrs = BaseAttributes() for m in self.RE_BUILTIN.findall(content) + self.RE_SINGLE.findall( content) + self.parse_multi_line(content): # replace gettext function and eval result try: attrs[m[0]] = ast.literal_eval(m[-1].replace('self._(', '(').replace( '_(', '(')) except Exception as e: self.log_debug(folder, name, "Error when parsing: {0}".format(m[-1])) self.log.debug(str(e)) if not hasattr(Base, "__{0}__".format(m[0])): # TODO: remove type from all plugins, its not needed if m[0] != "type" and m[0] != "author_name": self.log_debug(folder, name, "Unknown attribute '{0}'".format(m[0])) return attrs
def test_plugin(self, name, url, status): # Print to stdout to see whats going on print('{0}: {1}, {2}'.format(name, url, status)) log(DEBUG, '{0}: {1}, {2}'.format(name, url, status)) # url and plugin should be only important thing file = File(self.pyload, -1, url, url, 0, 0, 0, 0, url, name, '', 0, 0, 0, 0) file.init_plugin() self.thread.file = file self.thread.plugin = file.plugin try: a = time.time() file.plugin.preprocessing(self.thread) log(DEBUG, 'downloading took {0:d}s'.format(time.time() - a)) log(DEBUG, 'size {0:d} KiB'.format(file.size >> 10)) if status == 'offline': raise Exception('No offline Exception raised') if file.name not in self.files: raise Exception('Filename {0} not recognized'.format( file.name)) hash = md5() path = os.path.join(DL_DIR, file.name) if not os.path.exists(path): raise Exception('File {0} does not exists'.format(file.name)) with lopen(path, mode='rb') as fp: while True: buf = fp.read(4096) if not buf: break hash.update(buf) if hash.hexdigest() != self.files[file.name]: log(DEBUG, 'Hash is {0}'.format(hash.hexdigest())) size = os.stat(fp.name).st_size if size < 10 << 20: # 10MB # Copy for debug report log(DEBUG, 'Downloaded file copied to report') shutil.move(fp.name, os.path.join(plugin, fp.name)) raise Exception('Hash does not match') except Exception as e: if isinstance(e, Fail) and status == 'failed': pass elif isinstance( e, Fail) and status == 'offline' and str(e) == 'offline': pass else: raise
def test_plugin(self, name, url, status): # Print to stdout to see whats going on print('{0}: {1}, {2}'.format(name, url, status)) log(DEBUG, '{0}: {1}, {2}'.format(name, url, status)) # url and plugin should be only important thing file = File(self.pyload, -1, url, url, 0, 0, 0, 0, url, name, '', 0, 0, 0, 0) file.init_plugin() self.thread.file = file self.thread.plugin = file.plugin try: a = time.time() file.plugin.preprocessing(self.thread) log(DEBUG, 'downloading took {0:d}s'.format(time.time() - a)) log(DEBUG, 'size {0:d} KiB'.format(file.size >> 10)) if status == 'offline': raise Exception('No offline Exception raised') if file.name not in self.files: raise Exception( 'Filename {0} not recognized'.format(file.name)) hash = md5() path = os.path.join(DL_DIR, file.name) if not os.path.exists(path): raise Exception('File {0} does not exists'.format(file.name)) with lopen(path, mode='rb') as fp: while True: buf = fp.read(4096) if not buf: break hash.update(buf) if hash.hexdigest() != self.files[file.name]: log(DEBUG, 'Hash is {0}'.format(hash.hexdigest())) size = os.stat(fp.name).st_size if size < 10 << 20: # 10MB # Copy for debug report log(DEBUG, 'Downloaded file copied to report') shutil.move(fp.name, os.path.join(plugin, fp.name)) raise Exception('Hash does not match') except Exception as e: if isinstance(e, Fail) and status == 'failed': pass elif isinstance(e, Fail) and status == 'offline' and str(e) == 'offline': pass else: raise
def load(self, url, get={}, post={}, ref=True, cookies=True, just_header=False, decode=False): """ Load content at url and returns it :param url: url as string :param get: GET as dict :param post: POST as dict, list or string :param ref: Set HTTP_REFERER header :param cookies: use saved cookies :param just_header: if True only the header will be retrieved and returned as dict :param decode: Whether to decode the output according to http header, should be True in most cases :return: Loaded content """ if not hasattr(self, "req"): raise Exception("Plugin type does not have Request attribute") self.check_abort() res = self.req.load(url, get, post, ref, cookies, just_header, decode=decode) if self.pyload_core.debug: from inspect import currentframe frame = currentframe() dumpdir = os.path.join(self.pyload_core.cachedir, 'plugins', self.__name__) makedirs(dumpdir, exist_ok=True) filepath = os.path.join(dumpdir, "dump_{0}_line{1}.html".format( frame.f_back.f_code.co_name, frame.f_back.f_lineno)) with lopen(filepath, mode='wb') as fp: fp.write(res) del frame # delete the frame or it wont be cleaned if just_header: # parse header header = {'code': self.req.code} for line in res.splitlines(): line = line.strip() if not line or ":" not in line: continue key, none, value = line.partition(":") key = key.lower().strip() value = value.strip() if key in header: if isinstance(header[key], list): header[key].append(value) else: header[key] = [header[key], value] else: header[key] = value res = header return res
def decrypt_captcha(self, url, get={}, post={}, cookies=False, forceuser=False, imgtype='jpg', result_type='textual'): img = self.load(url, get=get, post=post, cookies=cookies) id = '{0:.2f}'.format(time.time())[-6:].replace('.', '') with lopen(os.path.join('tmp_captcha_{0}_{1}.{2}'.format( self.__name__, id, imgtype)), mode='wb') as fp: fp.write(img) log(DEBUG, 'Using ct for captcha') # put username and passkey into two lines in ct.conf conf = os.path.join(os.path.expanduser('~'), 'ct.conf') if not os.path.exists(conf): raise Exception('CaptchaService config {0} not found'.format(conf)) with lopen(conf, mode='rb') as fp: with get_request() as req: # TODO: Check get_request # raise timeout threshold req.c.setopt(LOW_SPEED_TIME, 300) json = req.load('http://captchatrader.com/api/submit', post={ 'api_key': '9f65e7f381c3af2b076ea680ae96b0b7', 'username': fp.readline().strip(), 'password': fp.readline().strip(), 'value': (FORM_FILE, fp.name), 'type': 'file' }, multipart=True) response = loads(json) log(DEBUG, str(response)) result = response[1] self.c_task = response[0] return result
def write(self, buf): """ Writes response. """ if self.rep.tell() > 1000000 or self.__abort: rep = self.get_response() if self.__abort: raise Abort with lopen("response.dump", mode='wb') as fp: fp.write(rep) raise Exception("Loaded Url exceeded limit") self.rep.write(buf)
def upload_container(self, filename, data): """ Uploads and adds a container file to pyLoad. :param filename: name of the file :param data: file content """ storagedir = self.pyload_core.config.get('general', 'storage_folder') filename = 'tmp_{0}'.format(filename) filepath = os.path.join(storagedir, filename) with lopen(filepath, mode='wb') as fp: fp.write(str(data)) return self.add_package(fp.name, [fp.name])
def addcrypted(): package = request.forms.get('referer', 'ClickAndLoad Package') dlc = request.forms['crypted'].replace(" ", "+") dlc_path = os.path.join(DL_ROOT, purge.name(package) + ".dlc") with lopen(dlc_path, mode='wb') as fp: fp.write(dlc) try: API.add_package(package, [dlc_path], paused=True) except Exception: return HTTPError() else: return "success\n"
def check_container(self, filename, data): """ Checks online status of urls and a submitted container file :param filename: name of the file :param data: file content :return: :class:`OnlineCheck` """ storagedir = self.__pyload.config.get('general', 'storage_folder') filename = 'tmp_{0}'.format(filename) filepath = os.path.join(storagedir, filename) with lopen(filepath, mode='wb') as fp: fp.write(str(data)) return self.check_links([fp.name])
def decrypt_captcha(self, url, get={}, post={}, cookies=False, forceuser=False, imgtype='jpg', result_type='textual'): img = self.load(url, get=get, post=post, cookies=cookies) id = '{0:.2f}'.format(time.time())[-6:].replace('.', '') with lopen(os.path.join('tmp_captcha_{0}_{1}.{2}'.format(self.__name__, id, imgtype)), mode='wb') as fp: fp.write(img) log(DEBUG, 'Using ct for captcha') # put username and passkey into two lines in ct.conf conf = os.path.join(os.path.expanduser('~'), 'ct.conf') if not os.path.exists(conf): raise Exception('CaptchaService config {0} not found'.format(conf)) with lopen(conf, mode='rb') as fp: with get_request() as req: # TODO: Check get_request # raise timeout threshold req.c.setopt(LOW_SPEED_TIME, 300) json = req.load( 'http://captchatrader.com/api/submit', post={'api_key': '9f65e7f381c3af2b076ea680ae96b0b7', 'username': fp.readline().strip(), 'password': fp.readline().strip(), 'value': (FORM_FILE, fp.name), 'type': 'file'}, multipart=True) response = loads(json) log(DEBUG, str(response)) result = response[1] self.c_task = response[0] return result
def main(): print("generating apitypes.js") file = os.path.join(PACKDIR, 'app', 'scripts', 'utils', 'apitypes.js') with lopen(file, 'wb') as fp: fp.write("""// Autogenerated, do not edit! /*jslint -W070: false*/ define([], function() { \t'use strict'; \treturn { """) for name in enums: enum = getattr(datatype, name) values = dict((attr, getattr(enum, attr)) for attr in dir(enum) if not attr.startswith("_")) fp.write("\t\t{0}: {1},{2}".format(name, values, os.linesep)) fp.write("\t};{0}});".format(os.linesep))
def check_download(self, rules, api_size=0, max_size=50000, delete=True, read_size=0): """ Checks the content of the last downloaded file, re match is saved to `last_check` :param rules: dict with names and rules to match (compiled regexp or strings) :param api_size: expected file size :param max_size: if the file is larger then it wont be checked :param delete: delete if matched :param read_size: amount of bytes to read from files larger then max_size :return: dictionary key of the first rule that matched """ if not os.path.isfile(self.last_download): return None size = os.stat(self.last_download) size = size.st_size if api_size and api_size <= size: return None elif size > max_size and not read_size: return None self.__pyload.log.debug("Download Check triggered") with lopen(self.last_download, mode='rb') as fp: content = fp.read(read_size if read_size else -1) # produces encoding errors, better log to other file in the future? #self.__pyload.log.debug("Content: {0}".format(content)) for name, rule in rules.items(): if isinstance(rule, str): if rule in content: if delete: remove(self.last_download, trash=True) return name elif hasattr(rule, "search"): m = rule.search(content) if m is not None: if delete: remove(self.last_download, trash=True) self.last_check = m return name
def parse_config(path): result = {} current_section = None with lopen(path, mode='rb') as fp: for line in bufread(fp, buffering=1): line = line.strip() if not line or line.startswith('#'): continue if line.startswith('['): current_section = line.replace('[', '').replace(']', '') result[current_section] = [] else: if not current_section: raise Exception('Line without section: {0}'.format(line)) result[current_section].append(line) return result
def test_cookies(self): req = CurlRequest({}) req.load(self.cookie_url) assert len(req.cj) > 0 dl = CurlDownload(Bucket(), req) assert req.context is dl.context is not None dl.download(self.cookie_url + "/cookies.php", "cookies.txt") with lopen("cookies.txt", mode='rb') as fp: cookies = fp.read().splitlines() self.assertEqual(len(cookies), len(dl.context)) for c in cookies: k, v = c.strip().split(":") self.assertIn(k, req.cj)
def _alias_handlers(dispatcher, websock_handlers_map_file): """ Set aliases specified in websock_handler_map_file in dispatcher. Args: dispatcher: dispatch.Dispatcher instance websock_handler_map_file: alias map file """ with lopen(websock_handlers_map_file) as fp: for line in fp: if line[0] == '#' or line.isspace(): continue m = re.match('(\S+)\s+(\S+)', line) if m is None: logging.warning(_("Wrong format in map file: {0}").format(line)) continue try: dispatcher.add_resource_path_alias( m.group(1), m.group(2)) except dispatch.DispatchException as e: logging.error(str(e))
def get_log(self, offset=0): """ Returns most recent log entries. :param offset: line offset :return: List of log entries """ # TODO: Rewrite! logfile_folder = self.config.get('log', 'logfile_folder') if not logfile_folder: logfile_folder = self.pyload_core.DEFAULT_LOGDIRNAME logfile_name = self.config.get('log', 'logfile_name') if not logfile_name: logfile_name = self.DEFAULT_LOGFILENAME filepath = os.path.join(logfile_folder, logfile_name) try: with lopen(filepath) as fp: lines = fp.readlines() if offset >= len(lines): return [] return lines[offset:] except Exception: return ['No log available']
def init(self): """ Main loop, which executes commands. """ version = self._check_version() self.conn = sqlite3.connect(self.DB_FILE) os.chmod(self.DB_FILE, 0o600) self.c = self.conn.cursor() if version is not None and version < DB_VERSION: success = self._convert_db(version) # delete database if not success: self.c.close() self.conn.close() try: self.__pyload.log.warning( self._("Database was deleted " "due to incompatible version")) except Exception: print("Database was deleted due to incompatible version") remove(self.VERSION_FILE) shutil.move(self.DB_FILE, self.DB_FILE + ".bak") with lopen(self.VERSION_FILE, mode='wb') as fp: fp.write(str(DB_VERSION)) self.conn = sqlite3.connect(self.DB_FILE) os.chmod(self.DB_FILE, 0o600) self.c = self.conn.cursor() self._create_tables() self.conn.commit()
def run_tesser(self, subset=False, digits=True, lowercase=True, uppercase=True): # self.log.debug("create tmp tif") # tmp = tempfile.NamedTemporaryFile(suffix=".tif") tmp_path = os.path.join("tmpTif_{0}.tif".format(self.__name__)) tmp = lopen(tmp_path, mode='wb') tmp.close() # self.log.debug("create tmp txt") # tmp_txt = tempfile.NamedTemporaryFile(suffix=".txt") tmp_txt_path = os.path.join("tmp_txt_{0}.txt".format(self.__name__)) tmp_txt = lopen(tmp_txt_path, mode='wb') tmp_txt.close() self.log.debug("save tiff") self.image.save(tmp.name, 'TIFF') if os.name == 'nt': tessparams = [ resource_filename(__package__, 'tesseract/tesseract.exe') ] else: tessparams = ['tesseract'] tessparams.extend([tmp.name, tmp_txt.name.replace(".txt", "")]) if subset and (digits or lowercase or uppercase): # self.log.debug("create temp subset config") # tmp_sub = tempfile.NamedTemporaryFile(suffix=".subset") with lopen(os.path.join("tmp_sub_{0}.subset".format( self.__name__)), mode='wb') as tmp_sub: tmp_sub.write("tessedit_char_whitelist ") if digits: tmp_sub.write("0123456789") if lowercase: tmp_sub.write("abcdefghijklmnopqrstuvwxyz") if uppercase: tmp_sub.write("ABCDEFGHIJKLMNOPQRSTUVWXYZ") tmp_sub.write(os.linesep) tessparams.append("nobatch") tessparams.append(tmp_sub.name) self.log.debug("run tesseract") self.run(tessparams) self.log.debug("read txt") try: with lopen(tmp_txt.name) as fp: self.result_captcha = fp.read().replace(os.linesep, "") except Exception: self.result_captcha = "" self.log.debug(self.result_captcha) try: remove(tmp.name) remove(tmp_txt.name) if subset and (digits or lowercase or uppercase): remove(tmp_sub.name) except Exception: pass
except Exception as e: if isinstance(e, Fail) and flag == "fail": pass else: raise # setup methods c = Core() urls = [] flags = {} crypterlinks = os.path.join(os.path.dirname(__file__), "crypterlinks.txt") with lopen(crypterlinks) as fp: links = (line.strip() for line in bufread(fp, buffering=1)) for l in links: if not l or l.startswith("#"): continue if l.startswith("http"): if "||" in l: l, flag = l.split("||") flags[l] = flag urls.append(l) h, crypter = c.pgm.parse_urls(urls) plugins = accumulate(crypter) for plugin, urls in plugins.items(): def meta_class(plugin):
except Exception as e: if isinstance(e, Fail) and flag == 'fail': pass else: raise # setup methods c = Core() urls = [] flags = {} crypterlinks = os.path.join(os.path.dirname(__file__), 'crypterlinks.txt') with lopen(crypterlinks) as fp: links = (line.strip() for line in bufread(fp, buffering=1)) for l in links: if not l or l.startswith('#'): continue if l.startswith('http'): if '||' in l: l, flag = l.split('||') flags[l] = flag urls.append(l) h, crypter = c.pgm.parse_urls(urls) plugins = accumulate(crypter) for plugin, urls in plugins.items(): def meta_class(plugin):
def decrypt_captcha( self, url, get={}, post={}, cookies=True, forceuser=False, imgtype='jpg', result_type='textual'): """ Loads a captcha and decrypts it with ocr, plugin, user input :param url: url of captcha image :param get: get part for request :param post: post part for request :param cookies: True if cookies should be enabled :param forceuser: if True, ocr is not used :param imgtype: Type of the Image :param result_type: 'textual' if text is written on the captcha or 'positional' for captcha where the user have to click on a specific region on the captcha :return: result of decrypting """ img = self.load(url, get=get, post=post, cookies=cookies) id = "{0:.2f}".format(time.time())[-6:].replace(".", "") with lopen(os.path.join("tmp_captcha_{0}_{1}.{2}".format(self.__name__, id, imgtype)), mode='wb') as fp: fp.write(img) name = "{0}OCR".format(self.__name__) has_plugin = name in self.pyload_core.pgm.get_plugins("internal") if self.pyload_core.captcha: OCR = self.pyload_core.pgm.load_class("internal", name) else: OCR = None if OCR and not forceuser: time.sleep(random.randint(3000, 5000) // 1000) self.check_abort() ocr = OCR() result = ocr.get_captcha(fp.name) else: task = self.pyload_core.exm.create_captcha_task( img, imgtype, fp.name, self.__name__, result_type) self.task = task while task.is_waiting(): if self.abort(): self.pyload_core.exm.remove_task(task) raise Abort time.sleep(1) # TODO: task handling self.pyload_core.exm.remove_task(task) if task.error and has_plugin: # ignore default error message since the user could use OCR self.fail( self._( "Pil and tesseract not installed and no Client connected for captcha decrypting")) elif task.error: self.fail(task.error) elif not task.result: self.fail( self._( "No captcha result obtained in appropriate time")) result = task.result self.pyload_core.log.debug( "Received captcha result: {0}".format(result)) if not self.pyload_core.debug: try: remove(fp.name) except Exception: pass return result
def main(): enums = [] classes = [] thrift_path = os.path.join(PACKDIR, "pyload.thrift") with lopen(thrift_path, mode='rb') as fp: tf = fp.read() print("generating apitypes.py") for name in dir(ttypes): klass = getattr(ttypes, name) if name in ("TBase", "TExceptionBase") or name.startswith("_") or not ( issubclass(klass, ttypes.TBase) or issubclass(klass, ttypes.TExceptionBase)): continue if hasattr(klass, "thrift_spec"): classes.append(klass) else: enums.append(klass) apitypes_path = os.path.join(PACKDIR, "apitypes.py") apitypes_debug_path = os.path.join(PACKDIR, "apitypes_debug.py") with lopen(apitypes_path, mode='wb') as fp: with lopen(apitypes_debug_path, mode='wb') as dev: fp.write("""# -*- coding: utf-8 -*- # Autogenerated by pyload # DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING class BaseObject(object): \t__version__ = {0} \t__slots__ = [] \tdef __str__(self): \t\treturn "<{0} {1}>".format(self.__class__.__name__, ", ".join("{0}={1}".format(k, getattr(self, k)) for k in self.__slots__)) class ExceptionObject(Exception): \t__version__ = {0} \t__slots__ = [] """.format(__version__)) dev.write("""# -*- coding: utf-8 -*- # Autogenerated by pyload # DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING{0} from pyload.core.datatype import *{0} """.format(os.linesep)) dev.write("enums = [" + os.linesep) # generate enums for enum in enums: name = enum.__name__ fp.write("class {0}:{1}".format(name, os.linesep)) for attr in sorted(dir(enum), key=lambda x: getattr(enum, x)): if attr.startswith("_") or attr in ("read", "write"): continue fp.write("\t{0} = {1}{2}".format(attr, getattr(enum, attr), os.linesep)) dev.write("\t\"{0}\",{1}".format(name, os.linesep)) fp.write(os.linesep) dev.write("]" + os.linesep * 2) dev.write("classes = {" + os.linesep) for klass in classes: name = klass.__name__ base = "ExceptionObject" if issubclass( klass, ttypes.TExceptionBase) else "BaseObject" fp.write("class {0}({1}):{2}".format(name, base, os.linesep)) # No attributes, do not write further info if not klass.__slots__: fp.write("\tpass" + os.linesep) continue fp.write("\t__slots__ = {0}{1}".format(klass.__slots__, os.linesep)) dev.write("\t'{0}' : [".format(name)) # create init args = ['self' ] + ["{0}=None".format(x) for x in klass.__slots__] specs = [] fp.write("\tdef __init__({0}):{1}".format( ", ".join(args), os.linesep)) for i, attr in enumerate(klass.__slots__): fp.write("\t\tself.{0} = {0}{1}".format(attr, os.linesep)) spec = klass.thrift_spec[i + 1] # assert correct order, so the list of types is enough for # check assert spec[2] == attr # dirty way to check optional attribute, since it is not in the generated code # can produce false positives, but these are not critical optional = re.search(optional_re.format(i + 1, attr), tf, flags=re.I) if optional is None: specs.append(get_spec(spec)) else: specs.append("(None, {0})".format(get_spec(spec))) fp.write(os.linesep) dev.write(", ".join(specs) + "]," + os.linesep) dev.write("}" + os.linesep) fp.write("class Iface(object):" + os.linesep) dev.write("methods = {" + os.linesep) for name in dir(Pyload.Iface): if name.startswith("_"): continue func = inspect.getargspec(getattr(Pyload.Iface, name)) fp.write("\tdef {0}({1}):{2}\t\tpass{2}".format( name, ", ".join(func.args), os.linesep)) spec = getattr(Pyload, "{0}_result".format(name)).thrift_spec if not spec or not spec[0]: dev.write("\t'{0}': None,{1}".format(name, os.linesep)) else: spec = spec[0] dev.write("\t'{0}': {1},{2}".format( name, get_spec(spec), os.linesep)) fp.write(os.linesep) dev.write("}" + os.linesep)