Exemplo n.º 1
0
def main(argv=None):
    """登陆界面,先获取本地ip,然后登陆,并显示流量时长等信息
        输入:空
        输出:空
    """
    #检查输入
    if argv is None:
        userid = default_userid
        passwd = default_passwd
    elif len(argv) == 1:
        userid = argv[0]
        passwd = default_passwd
    elif len(argv) == 2:
        userid = argv[0]
        passwd = argv[1]
    else:
        print("参数输入错误,请检查!")

    #post数据包
    post_data['DDDDD'] = userid
    post_data['upass'] = passwd
    post_data['0MKKey'] = ''

    #检查是否连接成功,若成功,从返回参数中提取ip并显示ip地址
    response = functions.requests_get(LOGIN_ADDR, user_timeout)
    title, script_text = functions.get_title(response)

    #若title为'欢迎登录北邮校园网络'说明连接成功,且为登录界面
    if title == '欢迎登录北邮校园网络':
        functions.disp_local_ip(script_text)
        #连接成功后进行登录
        response = functions.requests_post(LOGIN_ADDR, post_data)
        #获取title
        title, script_text = functions.get_title(response)
        if title == '登录成功窗':
            print("登陆成功")
            #显示信息
            response = functions.requests_get(LOGIN_ADDR, user_timeout)
            title, script_text = functions.get_title(response)
            functions.display_info(script_text)
        elif title == '信息返回窗':
            functions.login_error(script_text)
        else:
            print("未知登陆错误,请检查并重试")
    #若返回title为上网注销窗,说明已经成功登录
    elif title == "上网注销窗":
        print("已登陆成功")
    else:
        print("连接出现问题,请重试")
Exemplo n.º 2
0
def search():
    URL = "https://www.amazon.in/"
    driver = webdriver.Chrome('chromedriver')
    driver.implicitly_wait(10)
    driver.get(URL)
    #Using selenium to drive the browser
    search_button = driver.find_element_by_id("twotabsearchtextbox")
    search_term = str(input("What do you want?\n:"))
    search_button.send_keys(search_term)
    search_button.send_keys(Keys.ENTER)
        
    soup = bs4.BeautifulSoup(driver.page_source, "html5lib")

    links = soup.find_all('a', attrs={"class":"a-link-normal s-no-outline"})[:10]
    links_storage = []
    for link in links:
        links_storage.append(link.get('href'))

    for link in links_storage:
        url = URL + link
        new_webpage = driver.get(url)
        new_soup = bs4.BeautifulSoup(driver.page_source, "html5lib")
        
        print()
        print("Name= ", get_title(new_soup))
        print("Mrp= ", get_price_mrp(new_soup))
        print("You pay= ", get_price_deal_price(new_soup))
        print("Rating= ", get_rating(new_soup))
        print("Reviews= ", get_review(new_soup))
        print("Availability= ", get_availability(new_soup))
        print("|")
        print("Link for the product= ", url)
        print("...................................next")
Exemplo n.º 3
0
def main():

    try:

        functions.read_file(sys.argv[1])
        functions.get_app_num_pub_filing_date()

        if data.values[0] and data.values[1] != "":  # to check if the page is a patent or not
            functions.get_title()
            functions.get_abstract()
            functions.get_name_of_applicant()
            functions.get_name_of_inventor()
            functions.get_int_class_priorty_doc_num()
            functions.get_int_app_pub_num()
            functions.print_data()
        else:
            print "Not a patent"

    except:
        print "Error in extracting"
Exemplo n.º 4
0
def main():
    """ 注销登陆,返回注销是否成功,并显示流量时长等信息
        输入为空或用户名、密码
        返回为空
    """
    response = functions.requests_get(LOGOUT_ADDR, user_timeout)
    title, script_text = functions.get_title(response)
    return_msg = re.search(r'Msg=(\d\d);', script_text).group(1)
    if title == "信息返回窗" and return_msg == '14':
        print("注销成功")
        functions.display_info(script_text)
    else:
        print("注销失败")
Exemplo n.º 5
0
def inject_default_data():
    data = {
        "locale": LOCALE,
        "locales": [LOCALE],
        "dark": True,
        "show_search": True,
        "request": request,
        "main_title": SITE_NAME,
        "title": get_title(request.path)
    }
    if is_authenticated():
        data["email_hash"] = hashlib.md5(session["email"].encode()).hexdigest()
        data["name"] = session["name"]
        data["role"] = session["role"]
    return dict(data)
Exemplo n.º 6
0
def get_info(html):
    """Функция для извлечения информации из html-кода.
       Вход  : html-код
       Выход : список, состоящий из информации о заголовке, обложке, блок "Описание"
               блок основной информации, блок "Содержание", блок "Примечание".  """

    # Блок основной информации
    book_info = get_edition_notice(html)
    # Блок "Содержание"
    content = get_content(html)
    # Обложка книги
    cover = get_cover_image(html)
    # Заголовок
    title = get_title(html)
    # Блок "Описание" и "Примечание"
    opis, prim = get_book_info(html)

    return title, cover, book_info, opis, content, prim
Exemplo n.º 7
0

URL = 'https://www.amazon.in/'
driver = webdriver.Chrome("chromedriver")
driver.implicitly_wait(10)
driver.get(URL)
search_button = driver.find_element_by_id("twotabsearchtextbox")
search_term = str(input("What do you want?\n: "))
search_button.send_keys(search_term)
search_button.send_keys(Keys.ENTER)

#webpage = requests.get(URL, headers=HEADER)
soup = bs4.BeautifulSoup(driver.page_source, 'html5lib')

links = soup.find_all('a', attrs={'class':'a-link-normal s-no-outline'})
links_list = []
for link in links:
    links_list.append(link.get('href'))

for link in links_list:
    new_webpage = driver.get(URL + link)
    new_soup = bs4.BeautifulSoup(driver.page_source, 'html5lib')

    #function calls
    print("Product Title=", get_title(new_soup))
    print("Price=", get_price(new_soup))
    print("Rating=", get_rating(new_soup))
    print("User review=", get_review(new_soup))
    print("Availability=", get_availability(new_soup))
    print()
    print()
    # ------------------
    # ------------------------------------------------------------------------
    # TEST 1 of 8: Compare Expected Title with HTML Title, OG Title, and
    # Twitter Title
    # ------------------------------------------------------------------------
    print("   TEST 1 of 8: Title Check")

    # Build the expected title
    if station_slogan == None or station_slogan == "":
        expected_title = 'sanitized' % station_name
    else:
        expected_title = 'sanitized' % (station_name, station_slogan)
    expected_title = expected_title.upper()

    # Get the actual values from the web page
    html_title = functions.get_title(page_data)
    og_title = functions.get_meta_tag_content(page_data, "og:title", "")
    twitter_title = functions.get_meta_tag_content(page_data, "",
                                                   "twitter:title")

    # Comparison time - all titles should match
    print("      Looking for     : '%s'" % expected_title)
    print("      Title Tag Found : '%s'" % html_title)
    print("      OG Found        : '%s'" % og_title)
    print("      Twitter Found   : '%s'" % twitter_title)

    # Print Pass/Fail - Increment counters if actual/expected values don't match
    if expected_title == og_title and \
            expected_title == twitter_title and \
            expected_title == html_title:
        message = "   TEST: %s\n" % PASSED
Exemplo n.º 9
0
def page_title():
    return jsonify(title=get_title(request.args.get("endpoint")))
Exemplo n.º 10
0
    # of truth" for these tests.
    # If station_slogan is None...or empty....the expected title changes
    if station_slogan == None or station_slogan == "":
        expected_title = 'sanitized' % station_name
    else:
        expected_title = 'sanitized' % (station_name, station_slogan)
    expected_title = expected_title.upper()

    # ------------------------------------------------------------------
    # TEST: Compare Station API title with HTML <title>, og:title and
    # twitter:title. All 4 should match.
    # ------------------------------------------------------------------
    print("   TEST: Title Comparison")

    # Function returns string in upper case
    html_title = functions.get_title(page_source)
    og_title = functions.get_meta_tag_content(page_source, "og:title")
    twitter_title = functions.get_meta_tag_content(page_source,"", "twitter:title")

    # Comparison time - all titles should match
    print("      Looking for    : '%s'" % expected_title)
    print("      Title Tag Found: '%s'" % html_title)
    print("      OG Found       : '%s'" % og_title)
    print("      Twitter Found  : '%s'" % twitter_title)

    if expected_title == og_title and expected_title == twitter_title and \
            expected_title == html_title:
        message = "   TEST: %s\n" % PASSED
    else:
        message = "   TEST: %s\n" % FAILED
        stations_failed += 1