def get(url): print('connecting...') res = requests.get(url) m = re.match(r'^.*\}\(\'(.*)\',(\d*),(\d*),\'([\w|\+|\/|=]*)\'.*$', res.text) return decode(m.group(1), int(m.group(2)), int(m.group(3)), lz.decompressFromBase64(m.group(4)).split('|'))
def get_data(self, html: str): match = re.search(self.data_query_re, html) if not match: raise Exception("Couldn't extract article data") data = LZString().decompressFromBase64(match["data"]) data = unquote(data) return json.loads(data)
def get_bvsd(nid) -> str: bvsd_uuid: str = uuid.uuid4() encData_json: str = '{"a":"%s-4","b":"1.3.4","d":[{"i":"id","b":{"a":["0,%s"]},"d":"%s","e":false,"f":false},{"i":"pw","e":true,"f":false}],"h":"1f","i":{"a":"%s"}}' \ % (bvsd_uuid, nid, nid, USER_AGENTS) encData = LZString.compressToEncodedURIComponent(encData_json) bvsd: str = '{"uuid":"%s","encData":"%s"}' % (bvsd_uuid, encData) return bvsd
def parse_js(self, js): param = js[js.find('}(') + 2:-1] p1_end = param[1:].find('\'') + 1 p1 = param[1:p1_end] param = param[p1_end + 2:] p2_end = param.find(',') p2 = int(param[:p2_end]) param = param[p2_end + 1:] p3_end = param.find(',') p3 = int(param[:p3_end]) param = param[p3_end + 2:] p4_end = param.find('\'') p4 = param[:p4_end] p5 = 0 p6 = {} (p, a, c, k, e, d) = (p1, p2, p3, p4, p5, p6) k = LZString.decompressFromBase64(k).split('|') lenk = len(k) left = lenk - 10 - 26 key = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' index = 0 for i in key: d[i] = k[index] if k[index] else i index += 1 if index == lenk: break if index != lenk: for i in itertools.product( '123456789', '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' ): tmp = i[0] + i[1] d[tmp] = k[index] if k[index] else tmp index += 1 if index == lenk: break print('d: ', d) ac_list = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' now_rep = '' res = '' for i in p: if i in ac_list: now_rep += i else: res += d[now_rep] if now_rep in d else now_rep now_rep = '' res += i if now_rep: res += d[now_rep] if now_rep in d else now_rep res = res[res.find('{'):res.rfind('}') + 1] # print(res) return res
def _decode_master_js(s: str) -> dict: from lzstring import LZString matched = re.search( r"convert_formated_hex_to_string\('(.*)'\)\);", s).group(1) # convert formatted hex to string compressed = ''.join( chr(int(matched[i:i + 2], 16) | (int(matched[i + 2:i + 4], 16) << 8)) for i in range(0, len(matched) - 1, 4)) del matched res = LZString().decompress(compressed) return json.loads(res)
def handle_service_message(self, message, details=None): if details and details.topic: msg = Message.parse(message) service_uuid = details.topic.split('.')[-1] if msg.msgClass == MessageClass.SERVICE_DATA: self._store_data_frame(service_uuid, msg.payload) elif msg.msgClass == MessageClass.SERVICE_DATA_COMPRESSED: state = simplejson.loads(LZString().decompressFromUTF16( msg.payload)) self._store_data_frame(service_uuid, state, msg.date)
def response(returndata): tableId = returndata['table'] query = returndata['query'] columns = DQXDbTools.ParseColumnEncoding( LZString.decompressFromEncodedURIComponent(returndata['columns'])) database = None orderBy = None if 'database' in returndata: database = returndata['database'] if 'orderBy' in returndata: orderBy = json.loads(returndata['orderBy']) auth_query = DQXDbTools.CredentialInformation(returndata).get_auth_query( database, [tableId]) #Auth is checked when this context is entered with DQXDbTools.DBCursor(returndata, database, read_timeout=config.TIMEOUT) as cur: whc = DQXDbTools.WhereClause() whc.ParameterPlaceHolder = '%s' # NOTE: MySQL PyODDBC seems to require this nonstardard coding whc.Decode(query) if auth_query: whc.query = { "whcClass": "compound", "isCompound": True, "isRoot": True, "Components": [whc.query, auth_query], "Tpe": "AND" } whc.CreateSelectStatement() sqlquery = "SELECT {0} FROM {1}".format( ','.join([DBCOLESC(x['Name']) for x in columns]), DBTBESC(tableId)) if len(whc.querystring_params) > 0: sqlquery += " WHERE {0}".format(whc.querystring_params) if orderBy is not None: sqlquery += " ORDER BY {0}".format(','.join([ DBCOLESC(col) + ' ' + direction for direction, col in orderBy ])) print(sqlquery) cur.execute(sqlquery, whc.queryparams) yield b'\t'.join(col[0].encode('ascii', 'replace') for col in cur.description) + b'\n' for row in cur.fetchall(): line = b'\t'.join([str(x).encode('ascii', 'replace') for x in row]) + b'\n' yield line if DQXDbTools.LogRequests: DQXUtils.LogServer('###QRY:' + sqlquery) DQXUtils.LogServer('###PARAMS:' + str(whc.queryparams))
def main(): import argparse global model, spect_parser, decoder, args, device, decompressor parser = argparse.ArgumentParser( description='DeepSpeech transcription server') parser.add_argument('--host', type=str, default='0.0.0.0', help='Host to be used by the server') parser.add_argument('--port', type=int, default=8888, help='Port to be used by the server') parser = add_inference_args(parser) parser = add_decoder_args(parser) args = parser.parse_args() logging.getLogger().setLevel(logging.DEBUG) logging.info('Setting up server...') torch.set_grad_enabled(False) device = torch.device("cuda" if args.cuda else "cpu") model = load_model(device, args.model_path, args.half) if args.decoder == "beam": from decoder import BeamCTCDecoder decoder = BeamCTCDecoder(model.labels, lm_path=args.lm_path, alpha=args.alpha, beta=args.beta, cutoff_top_n=args.cutoff_top_n, cutoff_prob=args.cutoff_prob, beam_width=args.beam_width, num_processes=args.lm_workers) else: decoder = GreedyDecoder(model.labels, blank_index=model.labels.index('_')) spect_parser = OnlineSpectrogramParser(model.audio_conf, normalize=True) logging.info('Server initialised') decompressor = LZString() server = WebsocketServer(host=args.host, port=args.port) server.set_fn_new_client(new_client) server.set_fn_client_left(client_left) server.set_fn_message_received(message_received) server.run_forever()
def get_episodes(html, url): episodes = None cid = re.search(r"comic/(\d+)", url).group(1) # http://tw.ikanman.com/comic/10924/ episodes = get_list(html, cid) # http://tw.ikanman.com/comic/4350/ if not episodes: view_state = re.search( r'id="__VIEWSTATE" value="([^"]+)', html).group(1) ep_html = LZString.decompressFromBase64(view_state) episodes = get_list(ep_html, cid) episodes = [Episode(v[0].strip(), urljoin(url, v[1])) for v in episodes] return episodes[::-1]
def response(returndata): tableId = returndata['table'] query = returndata['query'] columns = DQXDbTools.ParseColumnEncoding(LZString.decompressFromEncodedURIComponent(returndata['columns'])) database = None orderBy = None if 'database' in returndata: database = returndata['database'] if 'orderBy' in returndata: orderBy = json.loads(returndata['orderBy']) auth_query = DQXDbTools.CredentialInformation(returndata).get_auth_query(database, [tableId]) #Auth is checked when this context is entered with DQXDbTools.DBCursor(returndata, database, read_timeout = config.TIMEOUT) as cur: whc = DQXDbTools.WhereClause() whc.ParameterPlaceHolder = '%s' # NOTE: MySQL PyODDBC seems to require this nonstardard coding whc.Decode(query) if auth_query: whc.query = { "whcClass": "compound", "isCompound": True, "isRoot": True, "Components": [ whc.query, auth_query ], "Tpe": "AND" } whc.CreateSelectStatement() sqlquery = "SELECT {0} FROM {1}" . format(','.join([DBCOLESC(x['Name']) for x in columns]), DBTBESC(tableId)) if len(whc.querystring_params) > 0: sqlquery += " WHERE {0}" . format(whc.querystring_params) if orderBy is not None: sqlquery += " ORDER BY {0}" . format(','.join([DBCOLESC(col) + ' ' + direction for direction, col in orderBy])) cur.execute(sqlquery, whc.queryparams) yield b'\t'.join(col[0].encode('ascii', 'replace') for col in cur.description) + b'\n' for row in cur.fetchall() : line = b'\t'.join([str(x).encode('ascii', 'replace') for x in row]) + b'\n' yield line if DQXDbTools.LogRequests: DQXUtils.LogServer('###QRY:' + sqlquery) DQXUtils.LogServer('###PARAMS:' + str(whc.queryparams))
def masterdata() -> dict: url = os.path.join(__COMMON_DIR, 'js', 'master.js') text = requests.getcache(url).text matched = re.search(r"convert_formated_hex_to_string\('(.*)'\)\);", text).group(1) def convert_formated_hex_to_string(hex_str): length = len(hex_str) res = (chr( int(hex_str[i:i + 2], 16) | (int(hex_str[i + 2:i + 4], 16) << 8)) for i in range(0, length - 1, 4)) return ''.join(res) compressed = convert_formated_hex_to_string(matched) del matched, text res = LZString().decompress(compressed) return json.loads(res)
def route_session_push(): h = int(request.args.get('i'), 16) e = check_for_session_errors(h) if e: return e print 'received session push' data = request.get_data() charset = request.mimetype_params.get('charset') or 'UTF-8' jd = LZString().decompressFromUTF16(data.decode(charset, 'utf-16')) print 'decompressed session push' jdl = json.loads(jd) d = jdl['map'] #print d d['sidf_state'] = 0 m = Transit.Map(0) m.from_json(d) m.sidf_state = 0 # Copy old map om = session_manager.auth_by_key(h).session.map # Save new map session_manager.auth_by_key(h).session.map = m # Save gids for service in om.services: for station in service.stations: if station.hexagons_known: for service_n in m.services: for station_n in service_n.stations: if station.location == station_n.location: station_n.set_hexagons(station.hexagons) # Copy user settings # TODO clean this up! settings = jdl['settings'] m.settings.from_json(settings) return json.dumps({"result": "OK"})
help='Move party to the left') move_group.add_argument('-r', '--right', action='store_true', help='Move party to the right') args = parser.parse_args(args=None if sys.argv[1:] else ['--help']) if args.file: try: save = open(args.file).read().strip() except FileNotFoundError: print("No such file") elif args.input: save = args.input data = json.loads(LZString().decompresFromBase64(save)) # +worldX moves to the right # -worldX moves to the left if args.left: move_amount = -5000 elif args.right: move_amount = 5000 for adventurer in data['adventurers']: adventurer['positionComponent']['worldX'] += move_amount print(LZString().compressToBase64(json.dumps(data)))
data["edges_bbox"]["maxy"] = keepout["max"][1] topSilk = readGerber(topSilkPath, yInvert) data["silkscreen"]["F"] = topSilk["json"] footprints = readFootprints(bomPath, cplPath, footprintsPath, yInvert) data["footprints"] = footprints["footprints"] data["bom"] = footprints["bom"] print("* Compressing the data...") jsonText = json.dumps(data) print("* Adding the pcb image...") with open(renderedPcbPath, mode='rb') as f: renderedPcb = f.read() html = html.replace('___PCBDPI___', renderedPcbDpi) html = html.replace('___PCBIMAGE___', 'data:image/png;base64,' + base64.b64encode(renderedPcb)) print("* Adding the BOM data...") jsonBase64 = LZString().compress_to_base64(jsonText) html = html.replace('___PCBDATA___', jsonBase64) print("* Writing the output BOM file...") with open(iBomFilePath, "wt") as wf: wf.write(html) wf.close() print "Done!"
def crawl_chapter(chapter_info): """crawl chapter""" print("crawl_chapter %s" % chapter_info["title"]) html = None url = chapter_info["url"] title = chapter_info["title"] # download html req = request.Request(url) req.add_header( 'User-Agent', 'Mozilla/6.0 (iPhone; CPU iPhone OS 8_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/8.0 Mobile/10A5376e Safari/8536.25' ) with request.urlopen(req) as response: if response.status != 200: print("download failed! status = %s, reason = %s" % (response.status, response.reason)) return html = response.read() # get script node soup = BeautifulSoup(html, 'html.parser') node = soup.find('script', text=re.compile(r'^window')) if node is None: print("get script node failed!") return script = node.get_text() if script is None: print("get script text failed!") return # get dict_json and compress_str match_result = re.match(r'.*({".*"}).*\'([0-9a-zA-Z/\+=]+)\'.*', script) if match_result is None: print("re.match failed! script = %s" % script) return dict_json = match_result.group(1) compress_str = match_result.group(2) # decompress compress_str lzstring = LZString() array_str = lzstring.decompresFromBase64(compress_str) if array_str is None: print("lzstring.decompressFromBase64 failed! compress_str = %s" % compress_str) return # split array_str to array comic_info_arr = array_str.split('|') if comic_info_arr is None or len(comic_info_arr) == 0: print("split array_str failed! array_str = %s" % array_str) return # build comic_info_dict comic_info_dict = {} index_to_key_map = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" def index_to_key(index): """index_to_key""" key = "" if index == 0: return "0" while index > 0: key = index_to_key_map[index % 62] + key index = int(index / 62) return key for index in range(0, len(comic_info_arr)): key = index_to_key(index) if comic_info_arr[index] != "": comic_info_dict[key] = comic_info_arr[index] else: comic_info_dict[key] = key # replace dict_json def replace(match): """replace""" return comic_info_dict[match.group(0)] comic_info_json = re.sub(r'\b\w+\b', replace, dict_json) if comic_info_json is None: print("re.sub failed! dict_json = %s" % dict_json) return # parse json to comic_info comic_info = json.loads(comic_info_json) if comic_info is None: print("json.loads failed! comic_info_json = %s" % comic_info_json) return if comic_info["images"] is None or len(comic_info["images"]) == 0: print("comic_info.images is None or empty! comic_info.images = %s" % str(comic_info.images)) return # add host comic_images = comic_info["images"] for i in range(0, len(comic_images)): comic_images[i] = "http://i.hamreus.com:8080" + comic_images[i] print("image count = %s" % len(comic_images)) # download image image_dir = "./images/" + title if not os.path.exists(image_dir): os.makedirs(image_dir) index = 0 while index < len(comic_images): image_url = comic_images[index] image_name = image_url.split('/')[-1] image_path = image_dir + "/" + image_name if os.path.exists(image_path): index += 1 continue req = request.Request(image_url) req.add_header("Accept", "image/webp,image/*,*/*;q=0.8") req.add_header("Referer", url) req.add_header("Connection", "keep-alive") req.add_header( 'User-Agent', 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36' ) try: image_data = request.urlopen(req).read() except error.HTTPError as err: if err.code == 522: print("%d, %s timeout" % (index, image_name)) time.sleep(5) continue else: raise err else: with open(image_path, "wb") as image_file: image_file.write(image_data) index += 1
def compress_data(data): json_str = json.dumps(data).encode("utf-8", "ignore").decode("utf-8") json_str = json_str.replace("NaN", "null") return LZString().compressToBase64(json_str)
name_offset = offsets[1] name_length = offsets[2] jpeg_offset = offsets[3] jpeg_length = offsets[4] bytecode_offset = offsets[5] bytecode_length = offsets[6] source_offset = offsets[7] source_length = offsets[8] #print('offsets: {}'.format(offsets)) id = filename.replace('.bin', '') name = data[name_offset:name_offset + name_length].decode('UTF-8') jpg = data[jpeg_offset:jpeg_offset + jpeg_length] bytecode = data[bytecode_offset:bytecode_offset + bytecode_length] sourcecode = data[source_offset:source_offset + source_length] decoded_source = LZString.decompressFromUint8Array(sourcecode) epe = {'name': name, 'id': id} epe['sources'] = json.loads(decoded_source) epe['preview'] = base64.b64encode(jpg).decode('UTF-8') write_epe(epe) if extract: jpg_fname = make_file_name(name, 'jpeg') with open(jpg_fname, 'wb') as fp: fp.write(jpg) print('extracted {}'.format(jpg_fname)) source_fname = make_file_name(name, 'js') with open(source_fname, 'w') as fp:
from lzstring import LZString if __name__ == '__main__': s = 'Žluťoučký kůň úpěl ďábelské ódy!' # generated with original js lib jsLzStringBase64 = 'r6ABsK6KaAD2aLCADWBfgBPQ9oCAlAZAvgDobEARlB4QAEOAjAUxAGd4BL5AZ4BMBPAQiA==' jsLzStringBase64Json = 'N4Ig5gNg9gzjCGAnAniAXKALgS0xApuiPgB7wC2ADgQASSwIogA0IA4tHACLYBu6WXASIBlFu04wAMthiYBEhgFEAdpiYYQASS6i2AWSniRURJgCCMPYfEcGAFXyJyozPBUATJB5pt8Kp3gIbAAvfB99JABrAFdKGil3MBj4MEJWcwBjRCgVZBc0EBEDIwyAIzLEfH5CrREAeRoADiaAdgBONABGdqaANltJLnwAMwVKJHgicxpyfDcAWnJouJoIJJS05hoYmHCaTCgabPx4THxZlfj1lWTU/BgaGBjMgAsaeEeuKEyAISgoFEAHSDBgifD4cwQGBQdAAbXYNlYAA0bABdAC+rDscHBhEKy0QsUoIAxZLJQA' print('String for encode: ' + s) print() print('Compress to base64:') base2 = LZString.compressToBase64(s) print('result: ' + base2) print('result js: ' + jsLzStringBase64) print('equals: ' + str(base2 == jsLzStringBase64)) print() print('Decompress from base64:') print('result: ' + LZString.decompressFromBase64(base2)) print('result from js: ' + LZString.decompressFromBase64(jsLzStringBase64)) print() jsonString = '{"glossary":{"title":"example glossary","GlossDiv":{"title":"S","GlossList":{"GlossEntry":{"ID":"SGML","SortAs":"SGML","GlossTerm":"Standard Generalized Markup Language","Acronym":"SGML","Abbrev":"ISO 8879:1986","GlossDef":{"para":"A meta-markup language, used to create markup languages such as DocBook.","GlossSeeAlso":["GML","XML"]},"GlossSee":"markup"}}}}}' print('Compress json to base64:')
def post(self): # self.write("Hello, world") # print("%r %s" % (self.request, self.request.body.decode())) args = json.loads(self.request.body.decode()) # print j['hat'] cmd = args['cmd'] if cmd == "poke": filePath = args["filePath"] out = {} out["exists"] = str(os.path.exists(filePath)) out["isDir"] = str(os.path.isdir(filePath)) self.write(json.dumps(out)) #list directory elif cmd == "ls": out = [] requestedData = args["plzSend"] filePath = args["filePath"] files = glob.glob(filePath + "*") for file in files: addMe = {'name': file, 'isDir': str(os.path.isdir(file))} if "size" in requestedData: addMe["size"] = os.path.getsize(file) out.append(addMe) self.write(json.dumps(out)) #make directory elif cmd == "mkdir": filePath = args["filePath"] name = args["name"] try: os.makedirs(filepath + name) self.write("created") except OSError as exception: if exception.errno == errno.EEXIST: self.write("exists") else: raise #save file elif cmd == "put": filePath = args["filePath"] Path(filePath).touch() writeType = args["writeType"] data = args["data"] # dataType = "" # if dataType in args: dataType = args["dataType"] print(dataType) if dataType == "png": fullData = LZString.decompress(data) print(fullData) imgdata = base64.decodestring(fullData.encode()) # imgdata = base64.b64decode(data) # imgdata = base64.b64decode()) with open(filePath, "wb") as file: file.write(imgdata) else: with open(filePath, writeType) as file: file.write(data) #get file elif cmd == "get": filePath = args["filePath"] try: with open(filePath) as file: self.write(file.read()) except IOError: raise else: self.write("Hello, world")