def __handle_node(search_paths, tp, v): for path in search_paths: f = path + '/' + tp try: data = pp(f, v) except: log.v("Tried " + f) continue log.v("Read " + f) try: jdata = json.loads(data) except Exception as e: raise Exception("Invalid JSON data in '" + f + "':\n" + str(e)) jdata['type'] = tp.split('/')[0] if jdata['type'] == 'login': jdata['vars'] = v if len(v['tag']) == 0: jdata['tag'] = '__default__' else: jdata['tag'] = v['tag'] return jdata if tp.split('/')[0] == 'login': log.w("Not found entry type '%s'" % tp) jdata = {} jdata['vars'] = v jdata['tag'] = '__default__' jdata['type'] = 'login' jdata['name'] = '/'.join(tp.split('/')[1:]) jdata['address'] = 'http://' + jdata['name'] jdata['form'] = {} return jdata else: raise Exception("Not found entry type '%s'" % tp)
def embed_html(path, defines, css_images_root): """ Preprocess HTML and embed CSS images """ def __embed_css_images(text, css_images_root): def __getimg(path): data = "".join(open(path, "rb").read().encode('base64').split("\n")) return "data:image/png;base64," + data urls_re = re.compile(r'url\(([^)]*)\)*') urls = [url.group(1) for url in urls_re.finditer(text)] for url in urls: text = text.replace(url, __getimg(css_images_root + "/" + url)) return text data = pp(path, defines, fatal=True) data = __embed_css_images(data, css_images_root) return data
def generate_bookmarklet(i, o, defines): log.v("> bookmarklet/%s" % os.path.dirname(i)) mkdir(os.path.dirname(o)) open(o, "w").write(pp(i , defines)) log.v(">> Save to " + o)