Пример #1
0
def isvalid(url):
    soup = json.loads(str(get_html_soup(url)))
    try:
        p = soup["data"]
        return True
    except:
        return False
Пример #2
0
def get_first_references_bitrumb():
    references = []
    soup = get_html_soup(BASE_URL_BITRUMB)
    list_items = soup.find_all("h3", {"class": "entry-title"})
    for item in list_items[0:5]:
        if is_exist_byurl(get_article_url_bitrumb(item)):
            continue
        else:
            references.append(get_article_url_bitrumb(item))
    return references
Пример #3
0
def get_first_references_okex():
    references = []
    soup = get_html_soup(
        "https://support.okex.com/hc/en-us/sections/115000447632-New-Token")
    list_items = soup.find_all("a", {"class": "article-list-link"})
    for item in list_items[0:5]:
        if is_exist_byurl(BASE_URL_OKEX + get_article_url_okex(item)):
            continue
        else:
            references.append(BASE_URL_OKEX + get_article_url_okex(item))
    return references
Пример #4
0
def get_first_references_binance2():
    references = []
    soup = get_html_soup(
        "https://support.binance.com/hc/en-us/sections/115000202591-Latest-News"
    )  # Я бы вынес линк
    list_items = soup.find_all("a", {"class": "article-list-link"})
    for item in list_items[0:5]:
        if is_exist_byurl(get_article_url_binance(item)):
            continue
        else:
            references.append(get_article_url_binance(item))
    return references
Пример #5
0
def get_upbit_text_article(html):
    soup = json.loads(str(get_html_soup(html)))
    text = []
    news_header = soup['data']['title']
    paragraphs = soup['data']['body']
    text.append({
        'header':
        "*" + news_header + "*" + "\n",
        'filling':
        str(paragraphs) + "\n",
        'url':
        html,
        'urlup':
        'https://upbit.com/service_center/notice?id=' +
        get_filling_article(html[-3:])
    })
    return text[0]