コード例 #1
0
ファイル: importer_curl.py プロジェクト: namuan/http-rider
 def import_data(self, curl_command):
     try:
         ctx: ParsedContext = uncurl.parse_context(curl_command)
         api_call = self.__extract_api_call(ctx)
         return None, [api_call]
     except BaseException:
         raise SyntaxError("Unable to parse curl command")
コード例 #2
0
def main():
    curl_input = xerox.paste()
    print("Input: -----")
    print(curl_input)
    print("-----\n\n")
    context = uncurl.parse_context(curl_input)
    request_data = json.loads(context.data)
    url = urlparse(context.url)
    query_params = get_query_dict(url.query)
    cookie_string = ";".join(f"{key}={value}"
                             for key, value in context.cookies.items())
    postman_collection = {
        "info": {
            "name":
            request_data["operationName"],
            "schema":
            "https://schema.getpostman.com/json/collection/v2.1.0/collection.json",
        },
        "item": [{
            "name": request_data["operationName"],
            "request": {
                "method": "POST",
                "header": [],
                "body": {
                    "mode": "graphql",
                    "graphql": {
                        "query": request_data["query"],
                        "variables": json.dumps(request_data["variables"]),
                    },
                },
                "url": {
                    "raw":
                    context.url,
                    "protocol":
                    url.scheme,
                    "host": [url.hostname],
                    "port":
                    url.port,
                    "path":
                    url.path.split("/"),
                    "query": [{
                        "key": key,
                        "value": value
                    } for key, value in query_params.items()],
                },
            },
            "response": [],
        }],
        "protocolProfileBehavior": {},
    }
    result = json.dumps(postman_collection)
    print("----- Postman Collection ----")
    print(result)
    print("---- Headers -----")
    for key, value in context.headers.items():
        print(f"{key}:{value}")
    print(f"Cookie:{cookie_string}")
    print("-----")
    xerox.copy(result)
コード例 #3
0
def extract_curl(cmd):
    if not cmd:
        return
    try:
        context = uncurl.parse_context(cmd)
    except SystemExit:
        raise Exception(f"Failed parsing curl: {cmd}") from None
    else:
        return context.url, context.headers, context.cookies
コード例 #4
0
def main():
    curl_input = xerox.paste()
    context = uncurl.parse_context(curl_input)
    try:
        request_data = json.loads(context.data)
    except JSONDecodeError:
        request_data = context.data
    print(request_data.replace("\\n", "\n"))
    print(request_data["operationName"])
    print("-----\n\n")
    print(request_data["query"])
    print("-----\n\n")
    print(json.dumps(request_data["variables"], indent=2))
コード例 #5
0
 def from_curl(cls, df, curl_cmd, df_response, df_request_name, sema=30):
     """
     inputs: POSTMAN - curl
     """
     import uncurl
     curl_cmd = curl_cmd.replace(" -L", "")
     curl_cmd = curl_cmd.replace("--data-raw", "-d")
     context = uncurl.parse_context(curl_cmd)
     context_kwargs = context._asdict()
     return AsyncDf(df=df,
                    df_response=df_response,
                    df_request_name=df_request_name,
                    sema=sema,
                    **context_kwargs)
コード例 #6
0
ファイル: solaredge_api.py プロジェクト: quent/brick-battery
 def __init__(self, curl_filename):
     """
     curl_filename text file containing the curl command used to retrieve
                       currentPowerFlow data
     """
     self.pv_generation = float('NaN')
     self.grid_import = float('NaN')
     # we use this to check if the SolarEdge stops updating
     # (server problem, no fresh data from inverter)
     # if polled every 3 seconds and now - last_changed > 30 secs
     # values are getting dubious
     self.last_changed = None
     self._session = None
     with open(curl_filename) as curl_file:
         curl_command = curl_file.read()
         self._cpf_context = uncurl.parse_context(curl_command)
コード例 #7
0
ファイル: helper.py プロジェクト: swavan/oneui
def curl_handler(command: str) -> RequestModal:
    try:
        removed_unwanted = command.replace("\\", "")
        _request = parse_context(f"{removed_unwanted}")
        return RequestModal(method=_request.method,
                            url=_request.url,
                            data=_request.data or None,
                            headers=dict(
                                zip(_request.headers.keys(),
                                    _request.headers.values())),
                            cookies=dict(
                                zip(_request.cookies.keys(),
                                    _request.cookies.values())),
                            verify=True,
                            auth=_request.auth)
    except Exception as err:
        SwaVanLogRecorder.send_log(f"Unable to parse the cURL {err}")
コード例 #8
0
  def processParityCheck(self):
    import uncurl
    try:
      with open(self.file_name, 'r') as file:
        data = file.read()
        curl_request_1 = data.replace(HOST_NAME, self.url_1).replace('\n', '')
        print("curl1: "+curl_request_1+"\n")
        self.url_context = uncurl.parse_context(curl_request_1)
        proc_1 = subprocess.Popen(curl_request_1, shell=True, stdout=subprocess.PIPE)
        (output_1, error_1) = proc_1.communicate()
        if error_1:
          print("error_1: " + str(error_1))
        
        curl_request_2 = data.replace(HOST_NAME, self.url_2).replace('\n', '')
        print("\ncurl2: "+curl_request_2+"\n")
        proc_2 = subprocess.Popen(curl_request_2, shell=True, stdout=subprocess.PIPE)
        (output_2, error_2) = proc_2.communicate()
        if error_2:
          print("error_2: " + str(error_2))
    except Exception as err:
      print("Exception encountered!")
      print(err)
      exit(1)

    #print("output_2:"+output_2+"\noutput_1:"+output_1)
    if output_2 and output_1:
      try:
        jsonObject_a = json.loads(output_1)
        jsonObject_b = json.loads(output_2)
        #print("\njsonObject_a: " + json.dumps(jsonObject_a, indent=4))
        self.unmatched_tags[self.url_1] = {}
        self.compare_object(self.url_1, '', self.sortedDeep(jsonObject_a), self.sortedDeep(jsonObject_b))
        self.unmatched_tags[self.url_2] = {}
        self.compare_object(self.url_2, '', self.sortedDeep(jsonObject_b), self.sortedDeep(jsonObject_a))

        json_data = json.dumps(self.unmatched_tags, indent=4, sort_keys=True)
        #print("\nParity discrepancy in:\n" + json_data)
        
        result = self.combine_results(json.loads(json_data))
        #print("\nresult: " + json.dumps(result))
        
        self.publish_results(result)
      except Exception as err:
        print(err)
        traceback.print_exc()
コード例 #9
0
ファイル: 98tang.py プロジェクト: yelc66/aj66
def retrieve_cookies_from_curl(env: str) -> dict:
    cURL = os.getenv(env, '').replace('\\', '')
    return uncurl.parse_context(curl_command=cURL).cookies
コード例 #10
0
ファイル: scraper.py プロジェクト: Ruborcalor/onecard_scraper
def get_user_transactions(username, password):
    session = requests.Session()

    def parsed_context_to_dict(parsed_context):
        res = {}
        for key in ["url", "method", "headers", "data", "cookies", "auth"]:
            res[key] = getattr(parsed_context, key)
        return res

    # GET LOGIN PAGE --------------------------------------------------------------------------------------------------
    response = session.request(**parsed_context_to_dict(
        uncurl.parse_context('''curl 'https://onecard.mcgill.ca/LogIn.aspx' 

        -H 'User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:69.0) Gecko/20100101 Firefox/69.0' 
        -H 'Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8' 
        -H 'Accept-Language: en-US,en;q=0.5' --compressed 
        -H 'DNT: 1' 
        -H 'Connection: keep-alive' 
        -H 'Upgrade-Insecure-Requests: 1' 
        -H 'Cache-Control: max-age=0' ''')))

    # get body parameters
    soup = BeautifulSoup(response.text, "html.parser")

    viewstate = urllib.parse.quote(soup.find(id="__VIEWSTATE")['value'],
                                   safe='')
    generator = urllib.parse.quote(
        soup.find(id="__VIEWSTATEGENERATOR")['value'], safe='')
    validation = urllib.parse.quote(soup.find(id="__EVENTVALIDATION")['value'],
                                    safe='')

    # LOGIN WITH BODY PARAMETERS --------------------------------------------------------------------------------------
    response = session.request(**parsed_context_to_dict(
        uncurl.parse_context('''curl 'https://onecard.mcgill.ca/LogIn.aspx' 
           -H 'User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:69.0) Gecko/20100101 Firefox/69.0' 
           -H 'Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8' 
           -H 'Accept-Language: en-US,en;q=0.5' --compressed 
           -H 'Content-Type: application/x-www-form-urlencoded' 
           -H 'DNT: 1' 
           -H 'Connection: keep-alive' 
           -H 'Referer: https://onecard.mcgill.ca/LogIn.aspx' 
           -H 'Upgrade-Insecure-Requests: 1' --data '__VIEWSTATE=''' +
                             viewstate + '''&__VIEWSTATEGENERATOR=''' +
                             generator + '''&__EVENTVALIDATION=''' +
                             validation + '''&tbUserName=''' + username +
                             '''&tbPassword=''' + password +
                             '''&Button1=Log+In' ''')))

    # GET FIRST TRANSACTIONS PAGE -------------------------------------------------------------------------------------
    response = session.request(**parsed_context_to_dict(
        uncurl.parse_context(
            '''curl 'https://onecard.mcgill.ca/Consumption.aspx' 
           -H 'User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:69.0) Gecko/20100101 Firefox/69.0' 
           -H 'Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8' 
           -H 'Accept-Language: en-US,en;q=0.5' --compressed 
           -H 'DNT: 1' 
           -H 'Connection: keep-alive' 
           -H 'Referer: https://onecard.mcgill.ca/Default.aspx' 
           -H 'Upgrade-Insecure-Requests: 1' ''')))

    # get body parameters
    soup = BeautifulSoup(response.text, "html.parser")

    viewstate = urllib.parse.quote(soup.find(id="__VIEWSTATE")['value'],
                                   safe='')
    generator = urllib.parse.quote(
        soup.find(id="__VIEWSTATEGENERATOR")['value'], safe='')
    validation = urllib.parse.quote(soup.find(id="__EVENTVALIDATION")['value'],
                                    safe='')

    account_balances = []
    account_table = soup.find(id="cphConsumption_gvAccounts")
    for i, table_row in enumerate(account_table.findAll('tr')[1:]):
        account_balances.append(
            [column.text for column in table_row.findAll('td')])

    account_balances = list(
        map(lambda row: {
            "account_name": row[0],
            "balance": row[1]
        }, account_balances))

    output_rows = []
    # write to csv
    table = soup.find(id="cphConsumption_gvTransDetail")
    for i, table_row in enumerate(table.findAll('tr')):
        if 2 < i < (len(table.findAll('tr')) - 2):
            output_rows.append(
                [column.text for column in table_row.findAll('td')])

    # check if there is next page
    hasnext = ("Next" in response.text)

    while hasnext:

        response = session.request(**parsed_context_to_dict(
            uncurl.parse_context(
                '''curl 'https://onecard.mcgill.ca/Consumption.aspx' 
            -H 'User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:69.0) Gecko/20100101 Firefox/69.0' -H 'Accept: */*' 
            -H 'Accept-Language: en-US,en;q=0.5' --compressed -H 'X-Requested-With: XMLHttpRequest' 
            -H 'X-MicrosoftAjax: Delta=true' -H 'Cache-Control: no-cache' 
            -H 'Content-Type: application/x-www-form-urlencoded; charset=utf-8' -H 'DNT: 1' 
            -H 'Connection: keep-alive' -H 'Referer: https://onecard.mcgill.ca/Consumption.aspx' --data 'ctl00%24cphConsumption%24ScriptManager1=ctl00%24cphConsumption%24upDetails%7Cctl00%24cphConsumption%24gvTransDetail&__EVENTTARGET=ctl00%24cphConsumption%24gvTransDetail&__EVENTARGUMENT=Page%24Next&__VIEWSTATE='''
                + viewstate + '''&__VIEWSTATEGENERATOR=''' + generator +
                '''&__EVENTVALIDATION=''' + validation +
                '''&__ASYNCPOST=true&' ''')))

        soup = BeautifulSoup(response.text, "html.parser")
        starting = (soup.text[soup.text.index("__VIEWSTATE") + 12:])
        broken_up = (starting.split("|"))
        viewstate = urllib.parse.quote(broken_up[0], safe='')
        generator = urllib.parse.quote(broken_up[4], safe='')
        validation = urllib.parse.quote(broken_up[8], safe='')

        table = soup.find(id="cphConsumption_gvTransDetail")
        for i, table_row in enumerate(table.findAll('tr')):
            if 2 < i < (len(table.findAll('tr')) - 2):
                output_rows.append(
                    [column.text for column in table_row.findAll('td')])

        hasnext = ("Next" in response.text)

    df = pd.DataFrame(output_rows,
                      columns=[
                          "transaction_id", "datetime", "amount", "location",
                          "device", "account"
                      ])

    # reverse the transactions
    df = df.reindex(index=df.index[::-1])

    # convert Date and Time column to datetime objects
    df["datetime"] = pd.to_datetime(df["datetime"])

    # set the index to the Date and Time column
    df = df.set_index("datetime")

    # convert the amount column to float
    df["amount"] = df["amount"].replace(["\\$", "\\(", "\\)"], '',
                                        regex=True).astype(float)

    return df, account_balances