コード例 #1
0
def requestJob(item):
    word = words[item]

    if SITE()==3 and not 4<len(word)<16:
        with print_lock:
            print("["+threading.current_thread().name+"] "+word+" is UNAVAILABLE on twitter because it has illegal length.")
    elif SITE()==10 and not len(word)<40:
        with print_lock:
            print("["+threading.current_thread().name+"] "+word+" is UNAVAILABLE on github because it has illegal length.")
    elif SITE()==13 and not 2<len(word)<21:
        with print_lock:
            print("["+threading.current_thread().name+"] "+word+" is UNAVAILABLE on pastebin because it has illegal length.")
    else:

        link = replace(word)
        s = requests.Session()
        if PROXY() == "True":
            plist = PROXYLIST()
            i = random.randrange(0, plist.__len__())
            sess = ProxyHelper().setProxy(s, plist[i])
            r = sess.get(link)
        else:
            r = s.get(link)
        with print_lock:
            log_result(r, word, link)
コード例 #2
0
ファイル: get.py プロジェクト: iblisbuu/cli
def requestJob(item):
    word = words[item]

    if ch.getSite()==3 and not 4<len(word)<16:
        with print_lock:
            print("["+threading.current_thread().name+"] "+word+" is UNAVAILABLE on twitter because it has illegal length.")
    elif ch.getSite()==10 and not len(word)<40:
        with print_lock:
            print("["+threading.current_thread().name+"] "+word+" is UNAVAILABLE on github because it has illegal length.")
    elif ch.getSite()==13 and not 2<len(word)<21:
        with print_lock:
            print("["+threading.current_thread().name+"] "+word+" is UNAVAILABLE on pastebin because it has illegal length.")
    else:
        link = replace(word)
        s = requests.Session()
        ua = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.3"
        headers = { "user-agent": ua }
        if ch.enableProxy():
            plist = ch.getProxies()
            i = random.randrange(0, plist.__len__())
            sess = ph.setProxy(s, plist[i])
            r = sess.get(link, headers=headers)
        else:
            r = s.get(link, headers=headers)
        with print_lock:
            log_result(r, word, link)
コード例 #3
0
ファイル: get.py プロジェクト: rasarab/cli-checker
def requestJob(item):
    word = words[item]
    link = replace(word)
    s = requests.Session()
    if PROXY() == "True":
        plist = PROXYLIST()
        i = random.randrange(0, plist.__len__())
        sess = ProxyHelper().setProxy(s, plist[i])
        r = sess.get(link)
    else:
        r = s.get(link)
    with print_lock:
        log_result(r, word, link)
コード例 #4
0
def requestJob(item):
    word = words[item]
    link = replace(word)
    if PROXY():
        pl = get_proxy_list()
        select_random_proxy(pl)
        if check_proxy():
            r = s.get(link, proxies=proxyDict)
        else:
            pl = get_proxy_list()
            select_random_proxy(pl)
            r = s.get(link, proxies=proxyDict)
    else:
        r = s.get(link)
    with print_lock:
        log_result(r, word, link)
コード例 #5
0
def parseJob(item):
    word = words[item]
    link = replace(word)
    if PROXY:
        pl = get_proxy_list()
        select_random_proxy(pl)
        if check_proxy():
            r = s.get(link, proxies=proxyDict)
        else:
            pl = get_proxy_list()
            select_random_proxy(pl)
            r = s.get(link, proxies=proxyDict)
    else:
        r = s.get(link)
    page = r.content
    soup = BeautifulSoup(page, "html.parser")
    matches = []
    if SITE() == 5:
        # Available
        match1 = soup.body.findAll(
            text='The specified profile could not be found.')
        # Taken
        match2 = soup.body.findAll(text='This profile is private.')
        match3 = soup.find('div', attrs={'class': 'profile_header'})

        matches = [match1, match2, match3]
    elif SITE() == 6:
        # Available
        match1 = soup.body.findAll(
            text='No group could be retrieved for the given URL.')
        # Taken
        match2 = soup.body.findAll(text='Request To Join')
        match3 = soup.find('div', attrs={'class': 'grouppage_header'})

        matches = [match1, match2, match3]
    else:
        print("Wrong site!")

    with print_lock:
        log_result(r, word, link, matches=matches)
コード例 #6
0
def parseJob(item):
    word = words[item]
    link = replace(word)
    s = requests.Session()
    if ch.enableProxy():
        plist = ch.getProxies()
        i = random.randrange(0, plist.__len__())
        sess = ph.setProxy(s, plist[i])
        r = sess.get(link)
    else:
        r = s.get(link)
    page = r.content
    soup = BeautifulSoup(page, "html.parser")
    matches = []
    if ch.getSite() == 5:
        # Available
        match1 = soup.body.findAll(
            text='The specified profile could not be found.')
        # Taken
        match2 = soup.body.findAll(text='This profile is private.')
        match3 = soup.find('div', attrs={'class': 'profile_header'})

        matches = [match1, match2, match3]
    elif ch.getSite() == 6:
        # Available
        match1 = soup.body.findAll(
            text='No group could be retrieved for the given URL.')
        # Taken
        match2 = soup.body.findAll(text='Request To Join')
        match3 = soup.find('div', attrs={'class': 'grouppage_header'})

        matches = [match1, match2, match3]
    else:
        print("Wrong site!")

    with print_lock:
        log_result(r, word, link, matches=matches)