コード例 #1
0
def test_parse_curl_with_request_kargs():

    uncurl.parse(
        "curl 'https://pypi.python.org/pypi/uncurl' -H 'Accept-Encoding: gzip,deflate,sdch'",
        timeout=0.1,
        allow_redirects=True).should.equal(
            """requests.get("https://pypi.python.org/pypi/uncurl",
    allow_redirects=True,
    timeout=0.1,
    headers={
        "Accept-Encoding": "gzip,deflate,sdch"
    },
    cookies={},
    auth=(),
)""")

    uncurl.parse(
        "curl 'https://pypi.python.org/pypi/uncurl' -H 'Accept-Encoding: gzip,deflate,sdch'",
        timeout=0.1).should.equal(
            """requests.get("https://pypi.python.org/pypi/uncurl",
    timeout=0.1,
    headers={
        "Accept-Encoding": "gzip,deflate,sdch"
    },
    cookies={},
    auth=(),
)""")
コード例 #2
0
ファイル: test_api.py プロジェクト: spulec/uncurl
def test_basic_get():
    uncurl.parse("curl 'https://pypi.python.org/pypi/uncurl'").should.equal(
        """requests.get("https://pypi.python.org/pypi/uncurl",
    headers={},
    cookies={},
)"""
    )
コード例 #3
0
def test_basic_get():
    uncurl.parse("curl 'https://pypi.python.org/pypi/uncurl'").should.equal(
        """requests.get("https://pypi.python.org/pypi/uncurl",
    headers={},
    cookies={},
    auth=(),
)""")
コード例 #4
0
ファイル: test_api.py プロジェクト: tomjwalsh/uncurl
def test_parse_curl_with_another_binary_data():
    uncurl.parse(
        """curl -H 'PID: 20000079' -H 'MT: 4' -H 'DivideVersion: 1.0' -H 'SupPhone: Redmi Note 3' -H 'SupFirm: 5.0.2' -H 'IMEI: wx_app' -H 'IMSI: wx_app' -H 'SessionId: ' -H 'CUID: wx_app' -H 'ProtocolVersion: 1.0' -H 'Sign: 7876480679c3cfe9ec0f82da290f0e0e' -H 'Accept: /' -H 'BodyEncryptType: 0' -H 'User-Agent: Mozilla/5.0 (Linux; Android 6.0.1; OPPO R9s Build/MMB29M; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/67.0.3396.87 Mobile Safari/537.36 hap/1.0/oppo com.nearme.instant.platform/2.1.0beta1 com.felink.quickapp.reader/1.0.3 ({"packageName":"com.oppo.market","type":"other","extra":{}})' -H 'Content-Type: text/plain; charset=utf-8' -H 'Host: pandahomeios.ifjing.com' --data-binary '{"CateID":"508","PageIndex":1,"PageSize":30}' --compressed 'http://pandahomeios.ifjing.com/action.ashx/otheraction/9028'"""
    ).should.equals(
        r"""requests.post("http://pandahomeios.ifjing.com/action.ashx/otheraction/9028",
    data='{"CateID":"508","PageIndex":1,"PageSize":30}',
    headers={
        "Accept": "/",
        "BodyEncryptType": "0",
        "CUID": "wx_app",
        "Content-Type": "text/plain; charset=utf-8",
        "DivideVersion": "1.0",
        "Host": "pandahomeios.ifjing.com",
        "IMEI": "wx_app",
        "IMSI": "wx_app",
        "MT": "4",
        "PID": "20000079",
        "ProtocolVersion": "1.0",
        "SessionId": "",
        "Sign": "7876480679c3cfe9ec0f82da290f0e0e",
        "SupFirm": "5.0.2",
        "SupPhone": "Redmi Note 3",
        "User-Agent": "Mozilla/5.0 (Linux; Android 6.0.1; OPPO R9s Build/MMB29M; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/67.0.3396.87 Mobile Safari/537.36 hap/1.0/oppo com.nearme.instant.platform/2.1.0beta1 com.felink.quickapp.reader/1.0.3 ({\"packageName\":\"com.oppo.market\",\"type\":\"other\",\"extra\":{}})"
    },
    cookies={},
)""")
コード例 #5
0
ファイル: test_api.py プロジェクト: tomjwalsh/uncurl
def test_parse_curl_with_insecure_flag():
    uncurl.parse("""curl 'https://pypi.python.org/pypi/uncurl' --insecure"""
                 ).should.equal(
                     """requests.get("https://pypi.python.org/pypi/uncurl",
    headers={},
    cookies={},
    verify=False
)""")
コード例 #6
0
ファイル: main.py プロジェクト: vladiibine/bots
	def __init__(self, options_request_str, post_request_str):
		"""Holds data regarding the `get_league` requests made. Strips out newline characters

		:param str options_request_str: the curl string for the OPTIONS http request
		:param str post_request_str: the curl string for the second http request, a POST
		"""
		self.options_request = uncurl.parse(options_request_str.replace('\n', ''))
		self.post_request = uncurl.parse(post_request_str.replace('\n', ''))
コード例 #7
0
ファイル: test_api.py プロジェクト: spulec/uncurl
def test_post_with_string_data():
    uncurl.parse("""curl 'https://pypi.python.org/pypi/uncurl' --data 'this is just some data'""").should.equal(
        """requests.post("https://pypi.python.org/pypi/uncurl",
    data='this is just some data',
    headers={},
    cookies={},
)"""
    )
コード例 #8
0
ファイル: test_api.py プロジェクト: tomjwalsh/uncurl
def test_post_with_string_data():
    uncurl.parse(
        """curl 'https://pypi.python.org/pypi/uncurl' --data 'this is just some data'"""
    ).should.equal("""requests.post("https://pypi.python.org/pypi/uncurl",
    data='this is just some data',
    headers={},
    cookies={},
)""")
コード例 #9
0
ファイル: test_api.py プロジェクト: spulec/uncurl
def test_colon_header():
    uncurl.parse("curl 'https://pypi.python.org/pypi/uncurl' -H ':authority:mobile.twitter.com'").should.equal(
        """requests.get("https://pypi.python.org/pypi/uncurl",
    headers={
        ":authority": "mobile.twitter.com"
    },
    cookies={},
)"""
    )
コード例 #10
0
ファイル: test_api.py プロジェクト: tomjwalsh/uncurl
def test_colon_header():
    uncurl.parse(
        "curl 'https://pypi.python.org/pypi/uncurl' -H ':authority:mobile.twitter.com'"
    ).should.equal("""requests.get("https://pypi.python.org/pypi/uncurl",
    headers={
        ":authority": "mobile.twitter.com"
    },
    cookies={},
)""")
コード例 #11
0
def test_parse_curl_with_raw_data():
    uncurl.parse(
        """curl 'https://pypi.python.org/pypi/uncurl' --data-raw 'this is just some data'"""
    ).should.equal("""requests.post("https://pypi.python.org/pypi/uncurl",
    data='this is just some data',
    headers={},
    cookies={},
    auth=(),
)""")
コード例 #12
0
def test_parse_curl_escaped_unicode_in_cookie():
    uncurl.parse(
        """curl 'https://pypi.python.org/pypi/uncurl' -H $'cookie: sid=00Dt00000004XYz\\u0021ARg' """
    ).should.equal("""requests.get("https://pypi.python.org/pypi/uncurl",
    headers={},
    cookies={
        "sid": "00Dt00000004XYz!ARg"
    },
    auth=(),
)""")
コード例 #13
0
ファイル: test_api.py プロジェクト: tomjwalsh/uncurl
def test_basic_headers():
    uncurl.parse(
        "curl 'https://pypi.python.org/pypi/uncurl' -H 'Accept-Encoding: gzip,deflate,sdch' -H 'Accept-Language: en-US,en;q=0.8'"
    ).should.equal("""requests.get("https://pypi.python.org/pypi/uncurl",
    headers={
        "Accept-Encoding": "gzip,deflate,sdch",
        "Accept-Language": "en-US,en;q=0.8"
    },
    cookies={},
)""")
コード例 #14
0
ファイル: test_api.py プロジェクト: spulec/uncurl
def test_basic_headers():
    uncurl.parse("curl 'https://pypi.python.org/pypi/uncurl' -H 'Accept-Encoding: gzip,deflate,sdch' -H 'Accept-Language: en-US,en;q=0.8'").should.equal(
        """requests.get("https://pypi.python.org/pypi/uncurl",
    headers={
        "Accept-Encoding": "gzip,deflate,sdch",
        "Accept-Language": "en-US,en;q=0.8"
    },
    cookies={},
)"""
    )
コード例 #15
0
def test_parse_curl_with_escaped_newlines():
    uncurl.parse(
        """curl 'https://pypi.python.org/pypi/uncurl' \\\n -H 'Accept-Encoding: gzip,deflate' \\\n --insecure"""
    ).should.equal("""requests.get("https://pypi.python.org/pypi/uncurl",
    headers={
        "Accept-Encoding": "gzip,deflate"
    },
    cookies={},
    auth=(),
    verify=False
)""")
コード例 #16
0
def test_cookies_dollar_sign():
    uncurl.parse(
        "curl 'https://pypi.python.org/pypi/uncurl' -H 'Accept-Encoding: gzip,deflate,sdch' -H $'Cookie: somereallyreallylongcookie=true'"
    ).should.equal("""requests.get("https://pypi.python.org/pypi/uncurl",
    headers={
        "Accept-Encoding": "gzip,deflate,sdch"
    },
    cookies={
        "somereallyreallylongcookie": "true"
    },
    auth=(),
)""")
コード例 #17
0
ファイル: test_api.py プロジェクト: tomjwalsh/uncurl
def test_cookies_lowercase():
    uncurl.parse(
        "curl 'https://pypi.python.org/pypi/uncurl' -H 'Accept-Encoding: gzip,deflate,sdch' -H 'cookie: foo=bar; baz=baz2'"
    ).should.equal("""requests.get("https://pypi.python.org/pypi/uncurl",
    headers={
        "Accept-Encoding": "gzip,deflate,sdch"
    },
    cookies={
        "baz": "baz2",
        "foo": "bar"
    },
)""")
コード例 #18
0
ファイル: test_api.py プロジェクト: spulec/uncurl
def test_cookies_lowercase():
    uncurl.parse("curl 'https://pypi.python.org/pypi/uncurl' -H 'Accept-Encoding: gzip,deflate,sdch' -H 'cookie: foo=bar; baz=baz2'").should.equal(
        """requests.get("https://pypi.python.org/pypi/uncurl",
    headers={
        "Accept-Encoding": "gzip,deflate,sdch"
    },
    cookies={
        "baz": "baz2",
        "foo": "bar"
    },
)"""
    )
コード例 #19
0
ファイル: test_api.py プロジェクト: spulec/uncurl
def test_post_with_dict_data():
    uncurl.parse("""curl 'https://pypi.python.org/pypi/uncurl' --data '{"evt":"newsletter.show","properties":{"newsletter_type":"userprofile"}}' -H 'Accept-Encoding: gzip,deflate,sdch' -H 'Cookie: foo=bar; baz=baz2'""").should.equal(
        """requests.post("https://pypi.python.org/pypi/uncurl",
    data='{"evt":"newsletter.show","properties":{"newsletter_type":"userprofile"}}',
    headers={
        "Accept-Encoding": "gzip,deflate,sdch"
    },
    cookies={
        "baz": "baz2",
        "foo": "bar"
    },
)"""
    )
コード例 #20
0
ファイル: test_api.py プロジェクト: tomjwalsh/uncurl
def test_post():
    uncurl.parse(
        """curl 'https://pypi.python.org/pypi/uncurl' --data '[{"evt":"newsletter.show","properties":{"newsletter_type":"userprofile"},"now":1396219192277,"ab":{"welcome_email":{"v":"2","g":2}}}]' -H 'Accept-Encoding: gzip,deflate,sdch' -H 'Cookie: foo=bar; baz=baz2'"""
    ).should.equal("""requests.post("https://pypi.python.org/pypi/uncurl",
    data='[{"evt":"newsletter.show","properties":{"newsletter_type":"userprofile"},"now":1396219192277,"ab":{"welcome_email":{"v":"2","g":2}}}]',
    headers={
        "Accept-Encoding": "gzip,deflate,sdch"
    },
    cookies={
        "baz": "baz2",
        "foo": "bar"
    },
)""")
コード例 #21
0
ファイル: test_api.py プロジェクト: spulec/uncurl
def test_post():
    uncurl.parse("""curl 'https://pypi.python.org/pypi/uncurl' --data '[{"evt":"newsletter.show","properties":{"newsletter_type":"userprofile"},"now":1396219192277,"ab":{"welcome_email":{"v":"2","g":2}}}]' -H 'Accept-Encoding: gzip,deflate,sdch' -H 'Cookie: foo=bar; baz=baz2'""").should.equal(
        """requests.post("https://pypi.python.org/pypi/uncurl",
    data='[{"evt":"newsletter.show","properties":{"newsletter_type":"userprofile"},"now":1396219192277,"ab":{"welcome_email":{"v":"2","g":2}}}]',
    headers={
        "Accept-Encoding": "gzip,deflate,sdch"
    },
    cookies={
        "baz": "baz2",
        "foo": "bar"
    },
)"""
    )
コード例 #22
0
ファイル: test_api.py プロジェクト: tomjwalsh/uncurl
def test_post_with_dict_data():
    uncurl.parse(
        """curl 'https://pypi.python.org/pypi/uncurl' --data '{"evt":"newsletter.show","properties":{"newsletter_type":"userprofile"}}' -H 'Accept-Encoding: gzip,deflate,sdch' -H 'Cookie: foo=bar; baz=baz2'"""
    ).should.equal("""requests.post("https://pypi.python.org/pypi/uncurl",
    data='{"evt":"newsletter.show","properties":{"newsletter_type":"userprofile"}}',
    headers={
        "Accept-Encoding": "gzip,deflate,sdch"
    },
    cookies={
        "baz": "baz2",
        "foo": "bar"
    },
)""")
コード例 #23
0
ファイル: __init__.py プロジェクト: dvm-shlee/ratconnectome
def parse_request_cmd(curl_cmd):
    request_cmd = uncurl.parse(curl_cmd)
    request_text = "".join(l.strip() for l in request_cmd.split('\n'))
    p_url = re.compile(r'requests.post\(((?!,).)*', )
    p_headers = re.compile(r'headers={((?!{).)*}')
    output = dict()

    u_start, u_end = p_url.search(request_text).span()
    h_start, h_end = p_headers.search(request_text).span()

    output["url"] = request_text[u_start + 15:u_end - 1]
    exec(request_text[h_start:h_end], output)
    return output['headers'], output['url']
コード例 #24
0
ファイル: app.py プロジェクト: weslambert/Shuffle-apps
    async def curl(self, statement):
        try: 
            if not statement.startswith("curl "):
                statement = "curl %s" % statement

            data = uncurl.parse(statement)
            request = eval(data)
            if isinstance(request, requests.models.Response):
                return request.text
            else:
                return "Unable to parse the curl parameter. Remember to start with curl "
        except:
            return "An error occurred during curl parsing"
コード例 #25
0
def print_parse():
	curl = """
	curl -i -s -k  -X $'GET' \
		-H $'Host: hacking.com:8080' -H $'Upgrade-Insecure-Requests: 1' -H $'User-Agent: Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36' -H $'Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3' -H $'Referer: http://hacking.com:8080/dvwa/vulnerabilities/xss_d/?default=French' -H $'Accept-Encoding: gzip, deflate' -H $'Accept-Language: en-US,en;q=0.9,id;q=0.8,ms;q=0.7,es;q=0.6,ru;q=0.5,pl;q=0.4,vi;q=0.3' -H $'Cookie: security=impossible; PHPSESSID=am2u82k8v9sh1gi6rf9hak5mkn' -H $'Connection: close' \
		-b $'security=impossible; PHPSESSID=am2u82k8v9sh1gi6rf9hak5mkn' \
		$'http://hacking.com:8080/dvwa/vulnerabilities/xss_d/?default=German'

	"""
	curl = curl.replace('-i -s -k  -X $\'GET\' ', '')
	curl = curl.replace('-i -s -k  -X $\'POST\' ', '')	
	
	context = uncurl.parse(curl)
	context = context.replace('"$', '"')
	print(context)
コード例 #26
0
ファイル: test_api.py プロジェクト: spulec/uncurl
def test_parse_curl_with_another_binary_data():
    uncurl.parse("""curl -H 'PID: 20000079' -H 'MT: 4' -H 'DivideVersion: 1.0' -H 'SupPhone: Redmi Note 3' -H 'SupFirm: 5.0.2' -H 'IMEI: wx_app' -H 'IMSI: wx_app' -H 'SessionId: ' -H 'CUID: wx_app' -H 'ProtocolVersion: 1.0' -H 'Sign: 7876480679c3cfe9ec0f82da290f0e0e' -H 'Accept: /' -H 'BodyEncryptType: 0' -H 'User-Agent: Mozilla/5.0 (Linux; Android 6.0.1; OPPO R9s Build/MMB29M; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/67.0.3396.87 Mobile Safari/537.36 hap/1.0/oppo com.nearme.instant.platform/2.1.0beta1 com.felink.quickapp.reader/1.0.3 ({"packageName":"com.oppo.market","type":"other","extra":{}})' -H 'Content-Type: text/plain; charset=utf-8' -H 'Host: pandahomeios.ifjing.com' --data-binary '{"CateID":"508","PageIndex":1,"PageSize":30}' --compressed 'http://pandahomeios.ifjing.com/action.ashx/otheraction/9028'""").should.equals(
        r"""requests.post("http://pandahomeios.ifjing.com/action.ashx/otheraction/9028",
    data='{"CateID":"508","PageIndex":1,"PageSize":30}',
    headers={
        "Accept": "/",
        "BodyEncryptType": "0",
        "CUID": "wx_app",
        "Content-Type": "text/plain; charset=utf-8",
        "DivideVersion": "1.0",
        "Host": "pandahomeios.ifjing.com",
        "IMEI": "wx_app",
        "IMSI": "wx_app",
        "MT": "4",
        "PID": "20000079",
        "ProtocolVersion": "1.0",
        "SessionId": "",
        "Sign": "7876480679c3cfe9ec0f82da290f0e0e",
        "SupFirm": "5.0.2",
        "SupPhone": "Redmi Note 3",
        "User-Agent": "Mozilla/5.0 (Linux; Android 6.0.1; OPPO R9s Build/MMB29M; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/67.0.3396.87 Mobile Safari/537.36 hap/1.0/oppo com.nearme.instant.platform/2.1.0beta1 com.felink.quickapp.reader/1.0.3 ({\"packageName\":\"com.oppo.market\",\"type\":\"other\",\"extra\":{}})"
    },
    cookies={},
)""")
コード例 #27
0
ファイル: api.py プロジェクト: Zenulous/curl_modifier
def execute_requests_with_file_substrings(curl_request, substring,
                                          full_path_to_file):
    import uncurl
    converted_request = uncurl.parse(curl_request)
    file = open(full_path_to_file)
    line_count = 0
    print('Job initiated')
    for line in file:
        sanitised_line = line.strip('\n').replace('\'', '\"')
        uncurled_request = replace_curl_substring(converted_request, substring,
                                                  sanitised_line)
        execute_uncurled_request(uncurled_request)
        print('Request with vector {} which replaced substring {} successful'.
              format(sanitised_line, substring))
        line_count += 1
    print('Job finished, {} requests completed'.format(line_count))
コード例 #28
0
ファイル: api.py プロジェクト: Zenulous/curl_modifier
def execute_repeated_request(curl_request, n, parallel=True):
    import uncurl
    import multiprocessing
    uncurled_request = uncurl.parse(curl_request)
    core_count = multiprocessing.cpu_count()
    if not parallel or core_count is 1:
        print('Serial job initiated')
        for request in range(n):
            execute_uncurled_request(uncurled_request)
    print('Parallel job initiated')
    p = multiprocessing.Pool(core_count)
    requests = [uncurled_request] * n
    job = p.map(execute_uncurled_request, requests)
    p.close()
    p.join()
    print('Job finished, {} requests completed'.format(len(job)))
コード例 #29
0
        pos_end = len(txtconfig)

except Exception as e:
    print('Falha ao pegar os parametros do request ! ' + requestname)
    print(e.args)

try:
    # Pega somente o texto do curl
    txtcurl = txtconfig[pos_ini:pos_end]

    # Retira caracteres inválidos
    txtcurl = txtcurl.replace('\n', '')
    txtcurl = txtcurl.replace(' \ ', '')

    response = None
    txtrequest = uncurl.parse(txtcurl)
    txtrequest = 'response = ' + txtrequest

    # Converte o curl em request
    txtrequest = uncurl.parse(txtcurl)
    txtrequest = 'response = ' + txtrequest

except Exception as e:
    print('Falha na preparação dos dados para o request !' + requestname)
    print(e.args)

try:
    # Inicia as variaveis para fazer o request
    status_code = None
    tempodecorrido = 0.000
コード例 #30
0
ファイル: main.py プロジェクト: Dorokah/sandbox
async def curl2python(curl_request: str = Form(...)):
    app_logger.info(f"curl: {curl_request}")
    python_code = uncurl.parse(curl_request)
    app_logger.info(f"requests: {python_code}")
    return python_code
コード例 #31
0
ファイル: uncurl.py プロジェクト: c6401/snippets
#!python3 -mpip install --user -U uncurl
from uncurl import parse

print(parse('''

'''))
コード例 #32
0
ファイル: daily_activity.py プロジェクト: lyneca/fb-grapher
print("1. Go to messenger.com and to the conversation you want to graph.")
print("2. Press F12 and go to the network tab.")
print("3. Scroll up in the conversation so that you load more messages.")
print("4. You should see a POST request to /api/graphqlbatch/. Right click on this request, and click \"Copy as cURL\".")
curl = input("Paste the cURL request you copied here: ")

your_name = input("What is your name? ")
their_name = input("What is their name? ")

print("Tidying cURL request...")
curl = re.sub(r"%2C%22before%22%3A\d+%7D", "%7D", curl)
curl = re.sub(r"message_limit%22%3A\d+%2C", "message_limit%22%3A10000000%2C", curl)
curl = re.sub(r" --2.0", "", curl)

print("Getting data...")
r = eval(uncurl.parse(curl))

print("Decoding and extracting...")
x = r.content.decode('U8')
j = json.loads(x[:x.index('{\n   ') - 1])
other_id = j['o0']['data']['message_thread']['thread_key']['other_user_id']
j = j['o0']['data']['message_thread']
messages = j['messages']['nodes']
#  date = datetime(2017, 4, 22)
#  out = open('out.txt', 'x')
#  for message in messages:
    #  if datetime.fromtimestamp(int(message['timestamp_precise'])/1000).date() == date.date():
        #  if 'message' in message:
            #  out.write(message['message']['text'] + '\n')
#  out.close()
コード例 #33
0
ファイル: export.py プロジェクト: ritiek/mitmproxy
def py_requests(f: flow.Flow) -> str:
    if not hasattr(f, "request"):
        raise exceptions.CommandError("Can't export flow with no request.")
    data = uncurl.parse(curl_command(f))
    return ("import requests\n\n" + data)
コード例 #34
0
def main():

    #    response = HtmlResponse(url='https://videos.raywenderlich.com/courses/90-programming-in-swift/lessons/1', **kwargs)
    cmd_curl = uncurl.parse(str_curl)
    response = eval(cmd_curl)
    fmt_curl = cmd_curl.replace(response.url, "%s")
    body = response.text
    tables = Selector(text=body).xpath(
        '//ul[@class="lesson-table"]')  #.extract()
    host = "https://" + get_host(response.url)

    title = Selector(
        text=body).xpath('//h2[@class="course-title"]/text()').extract_first()

    try:
        if not os.path.exists(title):
            os.makedirs(title)
        else:
            print('Already downloaded! Quitting.')
            exit(0)
    except OSError:
        print('FATAL: Error creating directory. ' + directory)
        exit(-1)

    fi = open('%s/info.txt' % title, "w")
    f = open('%s/videos.txt' % title, "w")

    # http://masnun.com/2016/09/18/python-using-the-requests-module-to-download-large-files-efficiently.html
    def download_file(url, filename):
        response = requests.get(url, stream=True)
        handle = open(title + '/' + filename, "wb")
        for chunk in response.iter_content(chunk_size=512):
            if chunk:  # filter out keep-alive new chunks
                handle.write(chunk)

    def grab(part, number, curl, time, name):
        r = eval(curl)
        body = r.text
        with open('%s/%s-%s %s.htm' % (title, part, number, name), 'w') as src:
            src.write(body)
        fi.write(r.url + '\n')
        materials = Selector(
            text=body).xpath('//a[@class="download-materials"]')
        if materials:
            materials_filename = materials.xpath('@download').extract_first()
            materials_url = host + materials.xpath('@href').extract_first()
            fi.write(materials_url + ' ' + materials_filename + '\n')
            #        materials = li.xpath('//section[@id="video-info"]')
            #        print(materials.extract(), materials_filename, materials_url)
            download_file(materials_url,
                          "%s-%s %s" % (part, number, materials_filename))
        vimeo_id = Selector(text=body).xpath(
            '//div[@id="vimeo-player"]/@data-vimeo-id').extract_first()
        #        download_vimeo(vimeo_id)
        vimeo_url = vimeo_pfx + vimeo_id
        fi.write(vimeo_url + '\n\n')
        f.write(vimeo_url + '\n')

    for part, table in enumerate(tables, 1):
        print("Part %d" % part)
        for li in table.xpath('li'):
            number = li.xpath(
                'span[@class="lesson-number"]/text()').extract_first()
            url = response.url
            name = li.xpath(
                'span[@class="lesson-name"]/a/text()').extract_first()
            if not name:  # Trick. Contains class 'active'
                name = li.xpath(
                    'span[@class="lesson-name"]/text()').extract_first()
            else:
                url = host + li.xpath(
                    'span[@class="lesson-name"]/a/@href').extract_first()
            time = li.xpath(
                'span[@class="lesson-time"]/text()').extract_first()
            print(number, url, name, time)
            fi.write(str(part) + '-' + number + ' ' + name + '\n')
            grab(part, number, fmt_curl % url, time, name)

    f.close()
    fi.close()

    # Launch youtube-dl
    print('\nRun the following command to download videos:')
    print(
        'cd "%s" && youtube-dl -a videos.txt --referer https://videos.raywenderlich.com/ --all-subs -o "%(autonumber)s %(id)s %(title)s.%(ext)s" --external-downloader aria2c --external-downloader-args "-c -j 3 -x 3 -s 3 -k 1M" && for f in *.vtt; do ffmpeg -i $f $f.srt; done"'
        % title)
コード例 #35
0
  -H 'Cache-Control: max-age=0' \
  -H 'sec-ch-ua: "Google Chrome";v="89", "Chromium";v="89", ";Not A Brand";v="99"' \
  -H 'sec-ch-ua-mobile: ?0' \
  -H 'Upgrade-Insecure-Requests: 1' \
  -H 'User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36' \
  -H 'Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9' \
  -H 'Sec-Fetch-Site: none' \
  -H 'Sec-Fetch-Mode: navigate' \
  -H 'Sec-Fetch-User: ?1' \
  -H 'Sec-Fetch-Dest: document' \
  -H 'Accept-Language: zh-CN,zh;q=0.9' \
  -H 'Cookie: read_mode=day; default_font=font2; locale=zh-CN; Hm_lvt_0c0e9d9b1e7d617b3e6842e85b9fb068=1616237295; __yadk_uid=ynf9cBVSMNLLsCZzCeKyg7tsQHodqm8B; web_login_version=MTYxNjIzNzMyOA%3D%3D--d359cc29a88014cd936a9af99bd35db45a669991; _ga=GA1.2.1476924542.1616237344; remember_user_token=W1sxMjI0MTIyNl0sIiQyYSQxMSRZNk1ESFBXbHNqYlhVSjEuTjM2bWcuIiwiMTYxNjQyOTk2MC45NzI0NTgxIl0%3D--f2fad88d4e055ce210350d8082be86b075ddcf75; _m7e_session_core=d100c914638dc090d837d9b63f072033; sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%221784f3ff75853c-0c274aca237e5-5771031-1327104-1784f3ff7599a3%22%2C%22first_id%22%3A%22%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E7%9B%B4%E6%8E%A5%E6%B5%81%E9%87%8F%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC_%E7%9B%B4%E6%8E%A5%E6%89%93%E5%BC%80%22%2C%22%24latest_referrer%22%3A%22%22%7D%2C%22%24device_id%22%3A%221784f3ff75853c-0c274aca237e5-5771031-1327104-1784f3ff7599a3%22%7D; Hm_lpvt_0c0e9d9b1e7d617b3e6842e85b9fb068=1616429971' \
  -H 'If-None-Match: W/"f44091782b9faf76ebeaca98cfd8b7b7"' \
  --compressed"""

result = uncurl.parse(cmd)
print(result)
"""
result:
requests.get("https://www.jianshu.com/u/66ffe8731054",
    headers={
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
        "Accept-Language": "zh-CN,zh;q=0.9",
        "Cache-Control": "max-age=0",
        "Connection": "keep-alive",
        "If-None-Match": "W/\"f44091782b9faf76ebeaca98cfd8b7b7\"",
        "Sec-Fetch-Dest": "document",
        "Sec-Fetch-Mode": "navigate",
        "Sec-Fetch-Site": "none",
        "Sec-Fetch-User": "******",
        "Upgrade-Insecure-Requests": "1",