Exemplo n.º 1
0
def base_soup():
    response = requests.get(url)
    soup = scrapper(response.content, 'html.parser')
    for script in soup(['script', 'style']):
        script.extract()
    chats = soup.findAll('div', {'class': 'message-content'})
    return chats
Exemplo n.º 2
0
def get_user_id(base_url):
    #Obtaining user_id

    headers = {
            'Host': base_url,
            'User-Agent': 'Guess what?',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Accept-Language': 'en-US,en;q=0.5',
            'Accept-Encoding': 'gzip, deflate',
            'Content-Type': 'application/x-www-form-urlencoded',
            'Content-Length': '30',
            'Connection': 'close',
            'Upgrade-Insecure-Requests': '1',
        }
    response = bypass_human_verfication(base_url)

    if response != False:
        soup = scrapper(response.content, 'html.parser')
        try:
            victim_id = soup.find('input', {'name': 'user_id_victim'}).get('value')
            return victim_id
        except Exception as e:
            return False
    else:
        return False
Exemplo n.º 3
0
def auto_troubleshoot(response_):
    valid_iterator = 0
    soup = scrapper(response_, 'html.parser')
    scripts = soup.find_all('script')
    while True:
        if 's.handle(' in str(scripts[valid_iterator]):
            return valid_iterator
            break
        else:
            pass
        valid_iterator += 1
Exemplo n.º 4
0
def main(item_type, supply):
    if item_type == "0":
        #data for search_by_email
        raw_data = str(supply).split('@')
        data = 'luser='******'&domain=' + raw_data[1] +'&luseropr=0&domainopr=0&submitform=em'
    elif item_type == "1":
        #data for search_password
        data = 'password='******'&submitform=pw'
    else:
        print(bcolors.FAIL + 'Input not defined' + bcolors.ENDC)
        exit(0)

    response = requests.post('http://pwndb2am4tzkvold.onion.pet/', headers=headers, data=data)

    soup = scrapper(response.content, 'html.parser')
    lists = soup.findAll('section')[3]
    try:
        return lists.text
    except:
        return soup.text
Exemplo n.º 5
0
def friendsProtocol(text):

    if 'friends' in text:
        url = 'https://www.youtube.com/results?search_query=friends'

        friends_list = list()

        webpage = requests.get(url).text
        webpage = scrapper(webpage, 'html.parser')

        for links in webpage.find_all('a', href=True):
            if '/watch' in links['href']:
                friends_list.append(links['href'])

        friends_list = list(set(friends_list))

        if openWebPage(random.choice(friends_list[:6])):
            return True

    return False
Exemplo n.º 6
0
def parse(webpage):
    # parsing the text of the webpage as html
    webpage = scrapper(webpage.text, 'html.parser')

    # we get the date of the last published article
    last_publish_date = str(webpage.select("[class=publishdate]")[0])
    last_publish_date = last_publish_date.split("\n", 2)[1].split(', ', 1)[1]
    last_publish_date = datetime.datetime.strptime(last_publish_date, "%B %d, %Y")

    article = webpage.select("[class=title]")[0]

    # we get the article title and link
    article_title = str(article).split('"')[-2]
    article_link = str(article.select("[href]")).split('"', 2)[1]

    if last_publish_date.date() == datetime.date.today():
        return f'''
        Google AI has published a new article!

        Title: {article_title}
        Link: {article_link}
        '''

    return None
Exemplo n.º 7
0
def get_json(response_, index_id):
    soup = scrapper(response_, 'html.parser')
    name = soup.find_all('script')
    json_data = str(name[index_id]).split('s.handle(')[1].split(');')[0]
    data = json.loads(json_data)
    return data
Exemplo n.º 8
0
def soup(url):
    response = requests.get(url)
    bsoup = scrapper(response.content, 'html.parser')
    return bsoup
Exemplo n.º 9
0
import requests
from bs4 import BeautifulSoup as scrapper
from SwiftModel import *
import SwiftModelGen
import os

swift_models = []

page = requests.get(
    'https://media.fitanalytics.com/resources/api/reference-20180420.html#type-style'
)
soup = scrapper(page.content, 'html.parser')
raw_models = soup.find_all('div', class_='set-description')

for raw_model in raw_models:
    # Only allow actual models
    raw_model_name = raw_model.find('h2')
    if 'id="type-' not in str(raw_model_name):
        continue

    # Only allow models with tables (properties)
    raw_model_tables = raw_model.find_all('table')
    if not raw_model_tables:
        continue

    # Extract model name and description
    model_name = raw_model.find('h2', recursive=False).get_text()
    try:
        model_descriptions = [
            x.get_text() for x in raw_model.find_all('p', recursive=False)
        ]