Esempio n. 1
0
def load_data_from_request(request):
    data_res: Dict[str, Any] = {"data": {}, "body": None}
    if request.method == "POST":
        if request.content_type == "application/json":
            data = request.body
            try:
                data_res["body"] = {**json.loads(request.body)}
            except:
                pass
        elif request.content_type == "text/plain":
            data = request.body
        else:
            data = request.POST.get("data")
    else:
        data = request.GET.get("data")
    if not data:
        return None

    # add the data in sentry's scope in case there's an exception
    with push_scope() as scope:
        scope.set_context("data", data)

    compression = (request.GET.get("compression")
                   or request.POST.get("compression")
                   or request.headers.get("content-encoding", ""))
    compression = compression.lower()

    if compression == "gzip":
        data = gzip.decompress(data)

    if compression == "lz64":
        if isinstance(data, str):
            data = lzstring.LZString().decompressFromBase64(
                data.replace(" ", "+"))
        else:
            data = lzstring.LZString().decompressFromBase64(
                data.decode().replace(" ", "+"))
        data = data.encode("utf-16", "surrogatepass").decode("utf-16")

    #  Is it plain json?
    try:
        # parse_constant gets called in case of NaN, Infinity etc
        # default behaviour is to put those into the DB directly
        # but we just want it to return None
        data = json.loads(data, parse_constant=lambda x: None)
    except json.JSONDecodeError:
        # if not, it's probably base64 encoded from other libraries
        data = base64_to_json(data)
    data_res["data"] = data
    # FIXME: data can also be an array, function assumes it's either None or a dictionary.
    return data_res
Esempio n. 2
0
def generate_cexplore_url(src: str, asm: str) -> str:
    # Base state url
    state = 'OYLghAFBqd5QCxAYwPYBMCmBRdBLAF1QCcAaPECAM1QDsCBlZAQwBtMQBGAZlICsupVs1qgA+hOSkAzpnbICeOpUy10AYVSsArgFtaIAEwAGUqvQAZPLUwA5PQCNMxEADZSAB1TTCS2pp19I1MvH0U6Kxt7XScXd1l5cNoGAmZiAgC9AxMZOUwFPxS0gki7R2c3GVT0zKCc6WqS6zKYitcAShlUbWJkDgByAFJDbmtkHSwAamHDVUUCAE8AOgQZweMAQWHR2nHtKZmqbV2k6RW1ze2xicxpw0MPYQXnc/v1rZHr/duZ6WRiPAeAivQzvd4AenBkw26HQk3UkzQUwQzlu/U6rBA/QArP1SAZ+sY8agsQjpN1ej8Rpw8QQsUTOhAkGhdB48OwyBQICy2RyQARdMgxMxgA5kFIqOyCM5pJQHPS8Q5rGkFliaaQWbo5gB5WisVWEvFYXQiYDsBWkfDEfKKABumFlhrMAA98tppWq8dZpZinaw8A5iCrNFhPaQCADdJ7OjR6Ew2BxOAAWfiCYSiEASMRSf0OWWQTqoIF+R14hI2vwqNS1AycMxqUrRWKCUK+Og1lveNu0RvlFx18sFOhFGpaLKCQdJEdNKJ9ieNDsDxq91r9zrknp9LgYrG4/EWkn9AVCkVi5CTCC4QgkO7cOvw1Cs9nOW+cdrw2kK9qdADWIG4xhLJwrjcNwAAchggUmADsJjYtiQhYkmeJRpwximASRKkIeZYgKYdKGoyMBQMRTIgPgVBUOQlCxowLDmpwdasAgspMSxFFUIsHgcKYxAsUYpB8dIHFcTxO44nimHEliAAieCUZMx7CqK4qTLa0iIswBCfoRnQoswWAuBAv4gEm3BLDB3BmcYrjQQAnA5xhgUxSEoSA0HQUs3DQQBrhgWZkHQWhYH7k6OEyHh4ZfqQcCwEg3QEB47rUdyj68hUdaYPgRD9qQtHxgxdYAO5Bh40aIRJoVYYe2ycJMRWEAgimCspZ46QyemYAZFTGRVyGkFG9xLK4kF2Xe2K2dwhhgXZhgplJ2FYrh+FfiZSbYksM1JjwjHbR5SbGAhvrcJJB5LVFhExaRzIYDg2UkCl+X0YmvACIYQimiAzDSLIMT6rkiSVhA5gdoYCHmCuzZ1q2SSgwhMN+JDGUAxWw4LmOdQIZOhTLs0TbIw0xRw1UxRI2uXSbgMAAC0KwpM32/Q4+qIrdkwota6IVXuC2HhsP2YH9CyTBulK3oY7Wc2RPLPpylDS3yHgLCwBB5VKMpyhaSq0CqYaajqeoGlhxqmuaTpWhW9qOlhmCusg7oDOq3pyBauZBsQCwhgMWERngUb9DSMZ0HRCZcCmb0fRmDMC0zCxCAG+a9UWpzndjyjA9WGO1vWlh43O0NdrDmedmEiO56uE55EOyTo4EWep9XpNl1DJOjrX86N7O5dvhTlJGOJ3Nnf0ysXleOVi6QD5PhyYvvnzjP6hL36kH+IxLHZSZzWBI0jNih3bX1p1hSnkUER1FXi1V0n9IvnT2sQPjKEmQA'

    state = lzstring.LZString().decompressFromBase64(state)
    data = prison.loads('(' + state + ')')

    # Insert our code in the editors
    data['g'][0]['g'][0]['g'][0]['i']['source'] = src
    data['g'][0]['g'][2]['g'][0]['i']['source'] = asm

    state = risonify(data)
    state = {'z': lzstring.LZString().compressToBase64(state)}
    url = (CEXPLORE_URL + risonify(state))
    return url
Esempio n. 3
0
def get_comic_index_page(comic_id):
    # https://www.manhuagui.com/comic/21175/
    index_url = f"https://www.manhuagui.com/comic/{comic_id}/"
    index_response = net.request(index_url, method="GET")
    result = {
        "chapter_info_list": [],  # 漫画列表信息
    }
    if index_response.status != net.HTTP_RETURN_CODE_SUCCEED:
        raise crawler.CrawlerException(
            crawler.request_failre(index_response.status))
    index_response_content = index_response.data.decode(errors="ignore")
    chapter_info_selector = pq(index_response_content).find("div.chapter")
    if chapter_info_selector.length != 1:
        raise crawler.CrawlerException("页面截取漫画列表失败\n" + index_response_content)
    group_name_selector = chapter_info_selector.find("h4")
    if group_name_selector.length == 0:
        if pq(index_response_content).find("#__VIEWSTATE").length == 1:
            decompress_string = pq(index_response_content).find(
                "#__VIEWSTATE").val()
            if decompress_string:
                decompress_html = lzstring.LZString().decompressFromBase64(
                    decompress_string)
                chapter_info_selector.html(decompress_html)
                group_name_selector = chapter_info_selector.find("h4")
    group_chapter_list_selector = chapter_info_selector.find(".chapter-list")
    if group_name_selector.length != group_chapter_list_selector.length:
        raise crawler.CrawlerException("页面截取章节数量异常\n" + index_response_content)
    for group_index in range(0, group_name_selector.length):
        #  获取分组名字
        group_name = group_name_selector.eq(group_index).text().strip()
        if not group_name:
            raise crawler.CrawlerException(
                "章节信息截取章节名失败\n" + group_name_selector.eq(group_index).html())
        chapter_list_selector = group_chapter_list_selector.eq(
            group_index).find("li")
        if chapter_list_selector.length == 0:
            raise crawler.CrawlerException(
                "章节信息截取章节内容失败\n" +
                group_chapter_list_selector.eq(group_index).html())
        for page_index in range(0, chapter_list_selector.length):
            result_comic_info = {
                "chapter_id": None,  # 章节id
                "chapter_name": None,  # 章节名
                "group_name": group_name,  # 漫画分组名字
            }
            chapter_selector = chapter_list_selector.eq(page_index)
            # 获取章节ID
            page_url = chapter_selector.find("a").attr("href")
            chapter_id = tool.find_sub_string(page_url, f"/comic/{comic_id}/",
                                              ".html")
            if not tool.is_integer(chapter_id):
                raise crawler.CrawlerException(f"页面地址 {page_url} 截取页面id失败")
            result_comic_info["chapter_id"] = int(chapter_id)
            # 获取章节名称
            chapter_name = chapter_selector.find("a").attr("title")
            if not chapter_name:
                raise crawler.CrawlerException(f"页面地址 {page_url} 截取章节名失败")
            result_comic_info["chapter_name"] = chapter_name.strip()
            result["chapter_info_list"].append(result_comic_info)
    return result
Esempio n. 4
0
    def test_batch_lzstring(self, patch_process_event_with_plugins):
        data = {
            "api_key": self.team.api_token,
            "batch": [{"type": "capture", "event": "user signed up", "distinct_id": "2"}],
        }

        response = self.client.generic(
            "POST",
            "/batch",
            data=lzstring.LZString().compressToBase64(json.dumps(data)).encode(),
            content_type="application/json",
            HTTP_CONTENT_ENCODING="lz64",
        )

        arguments = self._to_arguments(patch_process_event_with_plugins)
        arguments.pop("now")  # can't compare fakedate
        arguments.pop("sent_at")  # can't compare fakedate
        self.assertDictEqual(
            arguments,
            {
                "distinct_id": "2",
                "ip": "127.0.0.1",
                "site_url": "http://testserver",
                "data": data["batch"][0],
                "team_id": self.team.pk,
            },
        )
Esempio n. 5
0
def compress_json(data):
    """ Take a Python data object. Convert to JSON and compress using lzstring """
    json_string = json.dumps(data).encode('utf-8', 'ignore').decode('utf-8')
    # JSON.parse() doesn't handle `NaN`, but it does handle `null`.
    json_string = json_string.replace('NaN', 'null')
    x = lzstring.LZString()
    return x.compressToBase64(json_string)
Esempio n. 6
0
def load_data_from_request(request):
    data = None
    if request.method == "POST":
        if request.content_type in ["", "text/plain", "application/json"]:
            data = request.body
        else:
            data = request.POST.get("data")
    else:
        data = request.GET.get("data")

    if not data:
        return None

    # add the data in sentry's scope in case there's an exception
    with push_scope() as scope:
        scope.set_context("data", data)

    compression = (request.GET.get("compression")
                   or request.POST.get("compression")
                   or request.headers.get("content-encoding", ""))
    compression = compression.lower()

    if compression == "gzip" or compression == "gzip-js":
        try:
            data = gzip.decompress(data)
        except (EOFError, OSError) as error:
            raise RequestParsingError("Failed to decompress data. %s" %
                                      (str(error)))

    if compression == "lz64":
        if not isinstance(data, str):
            data = data.decode()
        data = data.replace(" ", "+")

        data = lzstring.LZString().decompressFromBase64(data)

        if not data:
            raise RequestParsingError("Failed to decompress data.")

        data = data.encode("utf-16", "surrogatepass").decode("utf-16")

    base64_decoded = None
    try:
        base64_decoded = base64_decode(data)
    except Exception:
        pass

    if base64_decoded:
        data = base64_decoded

    try:
        # parse_constant gets called in case of NaN, Infinity etc
        # default behaviour is to put those into the DB directly
        # but we just want it to return None
        data = json.loads(data, parse_constant=lambda x: None)
    except (json.JSONDecodeError, UnicodeDecodeError) as error_main:
        raise RequestParsingError("Invalid JSON: %s" % (str(error_main)))

    # TODO: data can also be an array, function assumes it's either None or a dictionary.
    return data
Esempio n. 7
0
    def get_showing_math(self):
        url = self.webview.get_uri()
        if url == MATHPASTE_URL:
            return ''

        assert url.startswith(MATHPASTE_URL + '#fullmath:')
        url_part = url[len(MATHPASTE_URL + '#fullmath:'):]
        return lzstring.LZString().decompressFromEncodedURIComponent(url_part)
Esempio n. 8
0
def load_data_from_request(request):
    data_res: Dict[str, Any] = {"data": {}, "body": None}
    if request.method == "POST":
        if request.content_type == "application/json":
            data = request.body
            try:
                data_res["body"] = {**json.loads(request.body)}
            except:
                pass
        else:
            data = request.POST.get("data")
    else:
        data = request.GET.get("data")
    if not data:
        return None

    # add the data in sentry's scope in case there's an exception
    with push_scope() as scope:
        scope.set_context("data", data)

    compression = (request.GET.get("compression")
                   or request.POST.get("compression")
                   or request.headers.get("content-encoding", ""))
    compression = compression.lower()

    if compression == "gzip":
        data = gzip.decompress(data)

    if compression == "lz64":
        if isinstance(data, str):
            data = lzstring.LZString().decompressFromBase64(
                data.replace(" ", "+"))
        else:
            data = lzstring.LZString().decompressFromBase64(
                data.decode().replace(" ", "+"))

    #  Is it plain json?
    try:
        data = json.loads(data)
    except json.JSONDecodeError:
        # if not, it's probably base64 encoded from other libraries
        data = base64_to_json(data)
    data_res["data"] = data
    # FIXME: data can also be an array, function assumes it's either None or a dictionary.
    return data_res
Esempio n. 9
0
    def __init__(self):
        # body of the constructor
        self.n = 14381369433201940027551004981531101389664202480307538229814538636528375530324916784648275342846064200752240371327365243073166608941196474279834403963356704932653126822110289080895929568884163124604293776884771686564362611005901390403422584516977911996792764335664968340999940950783877766880308894172937436007265807781388459052608503532852249921566927524082662862335290648815294524067599773684140383418624232924104541617626926472072688687371264498379146627129959141752404144593087110906042779187111258926263215948749765954791674515143215043275262420498238178981109945690872493507119465577150354577907966282275478587721

        self.e = 11

        self.d = 2614794442400352732282000905732927525393491360055916041784461570277886460059075779026959153244738945591316431150430044195121201625672086232697164356973946351391477604020052560162896285251666022655326141251776670284429565637436616436985924457632347635780502606484539698363625627415250503069147071667806806546731141552133536165716300868116176143089194512198552146222341852047239288533761374997477385711757076090179424976265484248481630409966752085517316503643585036170163261935246828510968077540764809126685647652976268995839874260260636031429660404400853960635269830038496349221493472831942315532337247285831272873499

        self.compressor = lzstring.LZString()
Esempio n. 10
0
    def get_comicbook_item(self):
        soup = self.get_soup(self.source_url)
        name = soup.find('div', {'class': 'book-title'}).h1.text
        desc = soup.find('div', {'id': 'intro-all'}).p.text

        li_list = soup.find('ul', {'class': 'detail-list'}).find_all('li')
        tag_soup = li_list[1].find_all('strong')[0]
        author_soup = li_list[1].find_all('strong')[1]
        author = author_soup.previous_element.a.get('title')
        img = soup.find('div', attrs={'class': 'book-cover'}).p.img
        cover_image_url = img.get('data-src') or img.get('src')
        status = soup.find('li', {'class': 'status'}).span.span.text
        book = self.new_comicbook_item(name=name,
                                       desc=desc,
                                       cover_image_url=cover_image_url,
                                       author=author,
                                       source_url=self.source_url,
                                       status=status)
        for a in tag_soup.previous_element.find_all('a'):
            name = a.get('title')
            href = a.get('href')
            tag = href.replace('/list/', '').replace('/', '')
            book.add_tag(name=name, tag=tag)

        chapter_soup = soup.find('div', {'class': 'chapter'})
        adult_input = soup.find('input', {'id': '__VIEWSTATE'})
        if adult_input is not None:
            adult_encoded_value = adult_input.get('value')
            if len(adult_encoded_value) > 0:
                adult_decoded_value = lzstring.LZString().decompressFromBase64(
                    adult_encoded_value)
                chapter_soup = BeautifulSoup(adult_decoded_value,
                                             'html.parser')
        h4_list = chapter_soup.find_all('h4')
        div_list = chapter_soup.find_all('div', {'class': 'chapter-list'})
        idx = 1
        ext_idx = {}
        for h4, div in zip(h4_list, div_list):
            for ul in div.find_all('ul'):
                for li in reversed(ul.find_all('li')):
                    href = li.a.get('href')
                    title = li.a.get('title')
                    full_url = urljoin(self.SITE_INDEX, href)
                    if h4.text.strip() == '单话' or len(h4_list) == 1:
                        book.add_chapter(chapter_number=idx,
                                         title=title,
                                         source_url=full_url)
                        idx += 1
                    else:
                        name = h4.text.strip()
                        ext_idx.setdefault(name, 1)
                        book.add_chapter(chapter_number=ext_idx[name],
                                         title=title,
                                         ext_name=name,
                                         source_url=full_url)
                        ext_idx[name] += 1
        return book
Esempio n. 11
0
def cmplzstring(data):
    import lzstring
    print(len(data))
    x = lzstring.LZString()
    compressed = x.compress(data)
    print(len(compressed))
    #compressed = x.compressToBase64(data)
    #decomp = x.decompressFromBase64(compressed)
    return (compressed)
Esempio n. 12
0
def read_uploaded_file(filedata=None,
                       decompress_data=0,
                       overwrite=False,
                       open_date=None,
                       brand=None):
    if not filedata:
        return

    lx = lzstring.LZString()

    if (int(decompress_data) > 0):
        frappe.publish_realtime("tally_import_progress",
                                {"message": "Decompressing"},
                                user=frappe.session.user)

        filedata = lx.decompressFromUTF16(filedata)

        frappe.publish_realtime("tally_import_progress",
                                {"message": "Decompression Complete"},
                                user=frappe.session.user)

    params = json.loads(frappe.form_dict.get("params") or '{}')

    if params.get("overwrite"):
        overwrite = params.get("overwrite")
    if params.get("open_date"):
        open_date = params.get("open_date")
    if params.get("brand"):
        brand = params.get("brand")

    global overwrite_existing
    overwrite_existing = overwrite

    global opening_date
    opening_date = open_date

    brand = brand.replace(" ", "") + ","
    global brand_category
    brand_category = brand.upper().rstrip(",").split(",")

    try:
        xmltodict.parse(filedata, item_depth=5, item_callback=process)
    except ParsingInterrupted:
        frappe.db.rollback()
        return {
            "messages": ["There was a Problem Importing" + ": " + "HG"],
            "error": True
        }

    frappe.db.commit()
    frappe.publish_realtime("tally_import_progress",
                            {"message": "Processed Batch"},
                            user=frappe.session.user)

    return {"messages": "Import Successful", "error": False}
Esempio n. 13
0
 def handle_md5_value(self):
     """
     使用正则表达式匹配出加密字符串,并通过lzstring解密
     :return:明文md5值
     """
     md5_search = re.compile(r"preInit\|(.*?)\|block_cc")
     self.handle_html()
     x = lzstring.LZString()
     self.js_decode_value = x.decompressFromBase64(self.js_value)
     print(self.js_decode_value)
     self.md5_value = md5_search.search(self.js_decode_value).group(1)
def compress(s):
    """
    Convert to a string and compress. lzstring is a special-purpose compressor,
    more suitable for short strings than typical compressors.
    
    :param s:
    :return:
    """
    
    s = ''.join(map(str, s))
    return lzstring.LZString().compress(s)
Esempio n. 15
0
def _load_data(request) -> Optional[Union[Dict, List]]:
    if request.method == "POST":
        if request.content_type == "application/json":
            data = request.body
        else:
            data = request.POST.get("data")
    else:
        data = request.GET.get("data")
    if not data:
        return None

    # add the data in sentry's scope in case there's an exception
    with push_scope() as scope:
        scope.set_context("data", data)

    compression = (
        request.GET.get("compression") or request.POST.get("compression") or request.headers.get("content-encoding", "")
    )
    compression = compression.lower()

    if compression == "gzip":
        data = gzip.decompress(data)

    if compression == "lz64":
        if isinstance(data, str):
            data = lzstring.LZString().decompressFromBase64(data.replace(" ", "+"))
        else:
            data = lzstring.LZString().decompressFromBase64(data.decode().replace(" ", "+"))

    #  Is it plain json?
    try:
        data = json.loads(data)
    except json.JSONDecodeError:
        # if not, it's probably base64 encoded from other libraries
        data = json.loads(
            base64.b64decode(data.replace(" ", "+") + "===")
            .decode("utf8", "surrogatepass")
            .encode("utf-16", "surrogatepass")
        )
    # FIXME: data can also be an array, function assumes it's either None or a dictionary.
    return data
Esempio n. 16
0
def revisit_debug():  # pragma: no cover
    global revisit_online
    global slow_mode
    global facilities_file
    global compressed_facilities
    global lzs
    import lzstring
    revisit_online = True
    slow_mode = False
    facilities_file = 'tests/python/fake_revisit_facilities.json'
    with open(facilities_file, 'rb') as facilities:
        compressed_facilities = facilities.read()
    lzs = lzstring.LZString()
Esempio n. 17
0
def html_to_file(cids, ou):
    co.init()
    print(co.Style.BRIGHT + 'Ready to crawl ' + co.Fore.RED + f'{len(cids)}' +
          co.Fore.RESET + ' contest(s)' + co.Style.RESET_ALL)
    submissions = sum(map(get_submissions, cids), [])[::-1]
    print(co.Style.BRIGHT + 'total submission(s): ' + co.Fore.MAGENTA +
          f'{len(submissions)}' + co.Style.RESET_ALL)

    parsed = parse_submissions(submissions)
    parsed_json = json.dumps(parsed)
    lz = lzstring.LZString()
    b64 = lz.compressToBase64(parsed_json)

    ou.write(html % b64)
Esempio n. 18
0
 def get_pages_opts(self):
     res = self.client.get(self.uri)
     raw_content = res.text
     res = re.search(
         r'<script type="text\/javascript">window\["\\x65\\x76\\x61\\x6c"\](.*\)) <\/script>', raw_content).group(1)
     lz_encoded = re.search(
         r"'([A-Za-z0-9+/=]+)'\['\\x73\\x70\\x6c\\x69\\x63'\]\('\\x7c'\)", res).group(1)
     lz_decoded = lzstring.LZString().decompressFromBase64(lz_encoded)
     res = re.sub(r"'([A-Za-z0-9+/=]+)'\['\\x73\\x70\\x6c\\x69\\x63'\]\('\\x7c'\)",
                  "'%s'.split('|')" % (lz_decoded), res)
     code = node.get_node_output(res)
     pages_opts = json.loads(
         re.search(r'^SMH.imgData\((.*)\)\.preInit\(\);$', code).group(1))
     return pages_opts
Esempio n. 19
0
    def get_volumes(self, start_from):
        comic_soup = self.client.get_soup(self.uri)
        logger.debug('soup=' + str(comic_soup))
        self.book_title = comic_soup.select_one(
            '.book-detail > .book-title > h1').text
        self.book_status = comic_soup.select_one(
            '.book-detail > .detail-list > .status > span > span').text
        anchors = comic_soup.select('.chapter-list > ul > li > a')
        if not anchors:  # for adult only pages, decrypt the chapters
            soup = lzstring.LZString().decompressFromBase64(
                comic_soup.find('input', id='__VIEWSTATE')['value'])
            anchors = bs4.BeautifulSoup(
                soup, 'html.parser').select('.chapter-list > ul > li > a')
        logger.debug('\ttitle=' + self.book_title)
        logger.debug('\tstatus=' + self.book_status)
        logger.debug('\tvols=' + str(anchors))
        print("\r== Checking <{}> == {: >80s}".format(self.book_title, ''),
              end='')
        if len(anchors) == 0:  # 已下架 or errors
            if self.book_status == '已下架':
                self.save_record(self.client.opts['record_conf'], {
                    'name': '',
                    'number': 0
                })
            else:
                logger.error('\nFailed to parse volumes!')
            return

        sorted_volume = []
        for anchor in anchors:
            vol = {}
            vol['link'] = anchor.get('href')
            vol['name'] = anchor.get('title')
            result = re.search(r"\d+", vol['name'])
            vol['number'] = int(result[0]) if result else 0
            sorted_volume.append(vol)
        sorted_volume.sort(key=lambda x: x['number'])
        for vol in sorted_volume:
            self.save_record(self.client.opts['record_conf'], vol)
            if vol['number'] < int(start_from):
                continue
            volume = MHGVolume(urllib.parse.urljoin(self.uri, vol['link']),
                               self.book_title, vol['name'], self.client)
            if volume.is_skip():
                continue
            self.print_newline()
            print(volume)
            yield volume
Esempio n. 20
0
def get_result_page(from_date, to_date, page, qi):
    params = json.dumps(PARAMS_TEMPLATE) % {
        "from_date": from_date,
        "to_date": to_date,
        "page": str(page),
        "page_size": PAGE_SIZE,
        "qi": qi
    }
    print(str(page) + '00')
    enc = lzstring.LZString()
    qz = enc.compressToBase64(params)
    r = requests.post(SEARCH_URL, data={'qz': qz})
    result = json.loads(r.content)
    # print("request: " + params)
    print(r.content)
    return result
Esempio n. 21
0
def solve_js(html):
    js = '(function' + re.findall('function(.*?)</script>', html)[0]
    encrpyted_js = js.split(',')[-3][1:-15]
    decrypted_js = lzstring.LZString().decompressFromBase64(encrpyted_js)
    original_js = js.split(',')
    original_js[-3] = "'" + decrypted_js + "'.split('|')"
    packed_js = 'eval(' + ','.join(original_js) + ')'
    # print('packed_js', packed_js)
    unpack = packer.unpack(packed_js)
    print(unpack)
    # js_result = jsbeautifier.beautify(unpack)
    # print('js_result', js_result)
    imgData = re.findall("SMH\.imgData\((.*?)\)\.preInit\(\)\;", unpack)[0]
    res = json.loads(imgData)
    print(res['bname'])
    return res
Esempio n. 22
0
def generate_cexplore_url(src: str, asm: str) -> str:
    # Base state url
    # state = 'OYLghAFBqd5QCxAYwPYBMCmBRdBLAF1QCcAaPECAM1QDsCBlZAQwBtMQBGAZlICsupVs1qgA+hOSkAzpnbICeOpUy10AYVSsArgFtaIAEwAGUqvQAZPLUwA5PQCNMxEADZSAB1TTCS2pp19I1MvH0U6Kxt7XScXd1l5cNoGAmZiAgC9AxMZOUwFPxS0gki7R2c3GVT0zKCc6WqS6zKYitcAShlUbWJkDgByAFJDbmtkHSwAamHDVUUCAE8AOgQZweMAQWHR2nHtKZmqbV2k6RW1ze2xicxpw0MPYQXnc/v1rZHr/duZ6WRiPAeAivQzvd4AenBkw26HQk3UkzQUwQzlu/U6rBA/QArP1SAZ+sY8agsQjpN1ej8Rpw8QQsUTOhAkGhdB48OwyBQICy2RyQARdMgxMxgA5kFIqOyCM5pJQHPS8Q5rGkFliaaQWbo5gB5WisVWEvFYXQiYDsBWkfDEfKKABumFlhrMAA98tppWq8dZpZinaw8A5iCrNFhPaQCADdJ7OjR6Ew2BxOAAWfiCYSiEASMRSf0OWWQTqoIF+R14hI2vwqNS1AycMxqUrRWKCUK+Og1lveNu0RvlFx18sFOhFGpaLKCQdJEdNKJ9ieNDsDxq91r9zrknp9LgYrG4/EWkn9AVCkVi5CTCC4QgkO7cOvw1Cs9nOW+cdrw2kK9qdADWIG4xhLJwrjcNwAAchggUmADsJjYtiQhYkmeJRpwximASRKkIeZYgKYdKGoyMBQMRTIgPgVBUOQlCxowLDmpwdasAgspMSxFFUIsHgcKYxAsUYpB8dIHFcTxO44nimHEliAAieCUZMx7CqK4qTLa0iIswBCfoRnQoswWAuBAv4gEm3BLDB3BmcYrjQQAnA5xhgUxSEoSA0HQUs3DQQBrhgWZkHQWhYH7k6OEyHh4ZfqQcCwEg3QEB47rUdyj68hUdaYPgRD9qQtHxgxdYAO5Bh40aIRJoVYYe2ycJMRWEAgimCspZ46QyemYAZFTGRVyGkFG9xLK4kF2Xe2K2dwhhgXZhgplJ2FYrh+FfiZSbYksM1JjwjHbR5SbGAhvrcJJB5LVFhExaRzIYDg2UkCl+X0YmvACIYQimiAzDSLIMT6rkiSVhA5gdoYCHmCuzZ1q2SSgwhMN+JDGUAxWw4LmOdQIZOhTLs0TbIw0xRw1UxRI2uXSbgMAAC0KwpM32/Q4+qIrdkwota6IVXuC2HhsP2YH9CyTBulK3oY7Wc2RPLPpylDS3yHgLCwBB5VKMpyhaSq0CqYaajqeoGlhxqmuaTpWhW9qOlhmCusg7oDOq3pyBauZBsQCwhgMWERngUb9DSMZ0HRCZcCmb0fRmDMC0zCxCAG+a9UWpzndjyjA9WGO1vWlh43O0NdrDmedmEiO56uE55EOyTo4EWep9XpNl1DJOjrX86N7O5dvhTlJGOJ3Nnf0ysXleOVi6QD5PhyYvvnzjP6hL36kH+IxLHZSZzWBI0jNih3bX1p1hSnkUER1FXi1V0n9IvnT2sQPjKEmQA'

    # state = lzstring.LZString().decompressFromBase64(state)
    # print(state)
    state = "g:!((g:!((g:!((h:codeEditor,i:(fontScale:13,j:1,lang:___c,selection:(endColumn:20,endLineNumber:6,positionColumn:20,positionLineNumber:6,selectionStartColumn:20,selectionStartLineNumber:6,startColumn:20,startLineNumber:6),source:'%23include+%22entity.h%22%0A%23include+%22functions.h%22%0A%23include+%22player.h%22%0A%23include+%22script.h%22%0A%0A//+Add+C+code+here+'),l:'5',n:'0',o:'C+source+%231',t:'0'),(h:compiler,i:(compiler:tmc_agbcc,filters:(b:'0',binary:'1',commentOnly:'0',demangle:'0',directives:'0',execute:'1',intel:'0',libraryCode:'1',trim:'1'),fontScale:14,j:1,lang:___c,libs:!(),options:'-O2',selection:(endColumn:1,endLineNumber:1,positionColumn:1,positionLineNumber:1,selectionStartColumn:1,selectionStartLineNumber:1,startColumn:1,startLineNumber:1),source:1),l:'5',n:'0',o:'tmc_agbcc+(Editor+%231,+Compiler+%231)+C',t:'0')),k:30.16338263472055,l:'4',m:100,n:'0',o:'',s:0,t:'0'),(g:!((g:!((h:diff,i:(fontScale:11,lhs:1,lhsdifftype:0,rhs:2,rhsdifftype:0),l:'5',n:'0',o:'Diff+tmc_agbcc+vs+cat',t:'0')),header:(),k:43.47343067999081,l:'4',m:77.37306843267108,n:'0',o:'',s:0,t:'0'),(g:!((h:output,i:(compiler:1,editor:1,fontScale:11,wrap:'1'),l:'5',n:'0',o:'%231+with+tmc_agbcc',t:'0')),header:(),l:'4',m:22.626931567328924,n:'0',o:'',s:0,t:'0')),k:45.89413114177405,l:'3',n:'0',o:'',t:'0'),(g:!((h:codeEditor,i:(fontScale:13,j:2,lang:assembly,selection:(endColumn:25,endLineNumber:1,positionColumn:25,positionLineNumber:1,selectionStartColumn:25,selectionStartLineNumber:1,startColumn:25,startLineNumber:1),source:'@+Add+assembly+code+here'),l:'5',n:'0',o:'Assembly+source+%232',t:'0'),(h:compiler,i:(compiler:pycat,filters:(b:'0',binary:'1',commentOnly:'0',demangle:'0',directives:'0',execute:'1',intel:'0',libraryCode:'0',trim:'1'),fontScale:14,j:2,lang:assembly,libs:!(),options:'',selection:(endColumn:1,endLineNumber:1,positionColumn:1,positionLineNumber:1,selectionStartColumn:1,selectionStartLineNumber:1,startColumn:1,startLineNumber:1),source:2),l:'5',n:'0',o:'cat+(Editor+%232,+Compiler+%232)+Assembly',t:'0')),k:23.94248622350541,l:'4',n:'0',o:'',s:0,t:'0')),l:'2',n:'0',o:'',t:'0')),version:4"
    data = prison.loads('(' + state + ')')

    # Insert our code in the editors
    data['g'][0]['g'][0]['g'][0]['i']['source'] = src
    data['g'][0]['g'][2]['g'][0]['i']['source'] = asm

    state = risonify(data)
    state = {'z': lzstring.LZString().compressToBase64(state)}
    url = (CEXPLORE_URL + risonify(state))
    return url
Esempio n. 23
0
def parse_js(html):
    text = html.replace('window["\\x65\\x76\\x61\\x6c"]', "eval")
    text = text.replace('\\x73\\x70\\x6c\\x69\\x63', "split")
    reg = re.compile(r'eval(.*?)</script>')
    code = reg.search(text).group(1)
    reg = re.compile("'(D.*?[\=]+)'")
    undecode = reg.search(code).group(1)
    x = lzstring.LZString()
    decode_str = x.decompressFromBase64(undecode)
    code = code.replace(undecode, decode_str)
    code = "function getinfo() { return " + code + "; }"
    ctx = execjs.compile(code)
    info = ctx.call("getinfo")
    reg = re.compile(r'\{.*\}')
    data = reg.search(info).group()
    json_data = json.loads(data)
    # print(json_data)
    return json_data
Esempio n. 24
0
def armyCode(code):
    decoded = lzstring.LZString().decompressFromBase64(code).split('|')

    faction = Faction.query.filter_by(army_id=decoded[0]).one()
    groups = breakGroups(decoded[3])
    units = []

    for group in groups:
        units.append(group.split('!'))

    units.pop()
    units.pop()

    armyList = {'Title': decoded[2], 'Points': decoded[1], 'Units': units}

    return render_template('code_info.html',
                           armyList=armyList,
                           faction=faction)
Esempio n. 25
0
    def test_debug_post_revisit(self):
        body = {
            'uuid': 'a',
            'name': 'b',
            'properties': {},
            'coordinates': [0, 0],
        }
        response = self.fetch('/debug/facilities',
                              method='POST',
                              body=json_encode(body),
                              _disable_xsrf=False)
        self.assertEqual(response.code, 201, msg=response.body)

        facility_response = self.fetch('/debug/facilities')
        lzs = lzstring.LZString()
        facility_json = json_decode(facility_response.body)
        compressed = facility_json['facilities']['children']['wn']['data'][0]
        facilities = lzs.decompressFromUTF16(compressed)
        self.assertEqual(json_decode(facilities)[-1]['name'], 'b')
Esempio n. 26
0
 def pasteFromClipboard(self):
     temp = Tk()
     try:
         self.dataTextEdit.setPlainText('')
         clipboardData = temp.selection_get(
             selection="CLIPBOARD").splitlines()
         if not 'Rarity' in clipboardData[0]: return
         itemName = clipboardData[1]
         data = {
             "attributes.league": self.leagueName,
             "info.tokenized.fullName": itemName
         }
         self.dataTextEdit.setPlainText(itemName)
         lz_string = lzstring.LZString()
         data = lz_string.compressToBase64(unicode(json.dumps(data)))
         subprocess.Popen(self.browserPath +
                          " https://poeapp.com/#/search/" + data,
                          shell=True)
     except:
         pass
Esempio n. 27
0
def main():
    '''
    Main method. Runs the program if it is used standalone (rather than as an exported library).
    '''

    parser = _get_args()
    #prov = Web3.HTTPProvider(parser.rpc_url)
    #w3 = Web3(prov)

    with open('./compiles/Registry.abi','r') as f:
        abi = json.loads(f.read())

    # Compress input with LZ
    with open(parser.filepath, 'r') as f:
        compressor = lzstring.LZString()
        upload_data = f.read()
        compressed_data = compressor.compressToUTF16(upload_data)
        print("Upload Data Length: {} | Compressed Data Length: {}".format(len(upload_data), len(compressed_data)))

    num_chunks = math.ceil(len(compressed_data) / BYTES_PER_CHUNK )
    print("Total length of input {}, Number chunks {}".format(len(upload_data),num_chunks))

    registry = w3.eth.contract(address=parser.reg_addr, abi=abi)

    out_compressed_len = registry.functions.getLen(parser.position).call()
    if out_compressed_len != len(compressed_data):
        print("Error out len does not match what it should {} | {}".format(out_compressed_len, len(compressed_data)))
        return
    print("Lens match, is good")

    reconstructor = ''
    for chunk_index in range(0, num_chunks):
        this_chunk = registry.functions.get(parser.position,chunk_index).call().decode("utf-16")
        reconstructor += this_chunk

    if not (reconstructor == compressed_data):
        print(reconstructor)
        print(compressed_data)
        print("They don't match")
    else:
        print("They match. Success, everything works.")
Esempio n. 28
0
def UpdateAll(dlroot, url):
    checked_id = re.match(check_re, url).group(2)
    try:
        res = requests.get(request_url % checked_id, proxies=getProxyDic())
        res.raise_for_status()
    except:
        print('錯誤:可能是沒網路或被ban ip?')
        return
    bs = bs4.BeautifulSoup(res.text, 'html.parser')
    title = bs.select('.book-title h1')[0]
    print('標題: %s' % title.text)
    authors_link = bs.select('a[href^="/author"]')
    authors = []
    for author in authors_link:
        authors.append(author.text)
    authors = '、'.join(authors)
    config_json = generate_config(title.text, authors)
    links = bs.select('.chapter-list a')
    if not links:
        links = bs4.BeautifulSoup(
            lzstring.LZString().decompressFromBase64(
                bs.select('#__VIEWSTATE')[0].attrs.get('value')),
            'html.parser').select('.chapter-list a')
    links.reverse()
    ch_list = []
    for link in links:
        ch_list.append([link.attrs['title'], link.attrs['href']])

    config_writed = False
    for ch in ch_list:
        if not config_writed:
            downloadCh(dlroot, host + ch[1], config_json)
        else:
            downloadCh(dlroot, host + ch[1])
            config_writed = True
        print('延遲5秒...')
        # 每話間隔5秒
        time.sleep(5)

    # bark 通知
    BarkNotify(title.text, False)
Esempio n. 29
0
	html = infile.read()
	infile.close()
	# parse the html file
	html = lxml.html.fromstring(html)
	logger.info("HTML template file %s read and parsed" % tfilename) 
	# replace the script with src in the local directory to inline
	logger.info("Combining files")
	for node in html.xpath("//script"):
		if node.get("src") != None:
			if os.path.isfile(os.path.join(template_dir, opt.template, node.get("src"))):
				logger.info("Inlining script %s" % os.path.join(template_dir, opt.template, node.get("src")))
				infile = open(os.path.join(template_dir, opt.template, node.get("src")), encoding="utf8")
				code = infile.read()
				if (node.get("src") in ("plotly.js", "xlsx.core.min.js")):
					logger.info("Compressing using lzstring")
					x = lzstring.LZString()
					code = x.compressToBase64(code)
					code = "compressed_codes.push('" + code + "');"
				node.text = code
				infile.close()
				del node.attrib["src"]
	# replace the stylesheet links with inline versions
	for node in html.xpath("//link[@rel='stylesheet' and @type='text/css']"):
		if node.get("href") != None and os.path.isfile(os.path.join(template_dir, opt.template, node.get("href"))):
			logger.info("Inlining stylesheet %s" % os.path.join(template_dir, opt.template, node.get("href")))
			infile = open(os.path.join(template_dir, opt.template, node.get("href")), encoding="utf8")
			node.text = infile.read()
			infile.close()
			for key in list(node.attrib.keys()):
				del node.attrib[key]
			node.tag = "style"
Esempio n. 30
0
 def show_math(self, math):
     url_part = lzstring.LZString().compressToEncodedURIComponent(math)
     self.webview.load_uri(MATHPASTE_URL + '#fullmath:' + url_part)
     self.webview.reload()   # no idea why this is needed