示例#1
0
def get_proxy_list(valid_proxies):
    from http_request_randomizer.requests.proxy.requestProxy import RequestProxy
    import requests
    import concurrent.futures
    req_proxy = RequestProxy() #you may get different number of proxy when  you run this at each time
    proxies_list = req_proxy.get_proxy_list() #this will create proxy list
    proxies = []
    for prox in proxies_list:
        proxies.append(prox.get_address())

    print(proxies)
    
    def extract(proxy):
        try: 
            r = requests.get('https://httpbin.org/ip', proxies={'http':"//" + proxy,'https':"//" +proxy},timeout = 1)
            print(r.json(),'-working')
            valid_proxies.append(proxy)
        except Exception as e:
            pass
        return proxy
    extract("203.115.112.218:3128")
    print("se llego")

    with concurrent.futures.ThreadPoolExecutor() as exector:
        exector.map(extract,proxies)

    return valid_proxies
示例#2
0
def botStartChrome():
    print("give me a bottle of rum!")
    req_proxy = RequestProxy() #you may get different number of proxy when  you run this at each time
    proxies = req_proxy.get_proxy_list()
    number = random.randint(0,50)
    print("rand int ==== " + str(number))
    PROXY = proxies[number].get_address()
    print(proxies[number].get_address())
    print(proxies[number].country)
    webdriver.DesiredCapabilities.CHROME['proxy']={
        "httpProxy":PROXY,
        "ftpProxy":PROXY,
        "sslProxy":PROXY,
        
        "proxyType":"MANUAL",
        
    }
    driver = webdriver.Firefox(executable_path='chromedriver')
    try:
        driver.get('https://temanpost.com/games/daftar-game-berbayar-yang-bisa-diklaim-gratis-pada-agustus-2020/')
        time.sleep(30)
    except:
        print("error something wrong")
    
    driver.quit()
 def __init__(self):
     self.req_proxy = RequestProxy()
     self.proxies = self.req_proxy.get_proxy_list(
     )  # this will create proxy list
     self.total = len(self.proxies)
     self.getExtraProxies()
     pass
示例#4
0
def make_request():
    req_proxy = RequestProxy(
    )  #you may get different number of proxy when  you run this at each time
    proxies = req_proxy.get_proxy_list()  #this will create proxy list
    ls = []
    for prox in proxies:
        ls.append(prox.get_address())
    save_prox(ls)
def fetch_via_proxy(url):
    import time
    from http_request_randomizer.requests.proxy.requestProxy import RequestProxy
    req_proxy = RequestProxy()
    request = req_proxy.generate_proxied_request(url)
    if request is not None:
        time.sleep(5)
        return request
示例#6
0
    def __init__(self, conf, file=None):

        self.config = conf

        self.req_proxy = RequestProxy(protocol=Protocol.HTTP)
        if file is not None:
            self.proxies_list = self.config["proxies"]
        else:
            self.proxies_list = self.req_proxy.get_proxy_list()
示例#7
0
    def __init__(self, url='https://youtube.com', proxy=None, verbose=False):
        """ init variables """

        self.url = url
        self.proxy = proxy
        self.verbose = verbose
        # All chrome options
        # https://peter.sh/experiments/chromium-command-line-switches/
        self.options = webdriver.ChromeOptions()
        # Run in headless mode, without a UI or display server dependencies
        self.options.add_argument('--headless')
        # Disables GPU hardware acceleration. If software renderer is not in
        # place, then the GPU process won't launch
        self.options.add_argument('--disable-gpu')
        # Disable audio
        self.options.add_argument('--mute-audio')
        # Runs the renderer and plugins in the same process as the browser
        self.options.add_argument('--single-process')
        # Autoplay policy
        self.options.add_argument('--autoplay-policy=no-user-gesture-required')
        if self.proxy:
            # Uses a specified proxy server, overrides system settings. This
            # switch only affects HTTP and HTTPS requests
            self.options.add_argument('--proxy-server={0}'.format(self.proxy))
        # A string used to override the default user agent with a custom one

        req_proxy = RequestProxy(
        )  #you may get different number of proxy when  you run this at each time
        proxies = req_proxy.get_proxy_list()  #this will create proxy list
        PROXY = proxies[0].get_address()
        webdriver.DesiredCapabilities.CHROME['proxy'] = {
            "httpProxy": PROXY,
            "ftpProxy": PROXY,
            "sslProxy": PROXY,
            "proxyType": "MANUAL",
        }
        self.user_agent = utils.user_agent()
        self.options.add_argument('--user-agent={0}'.format(self.user_agent))
        self.browser = webdriver.Chrome(ChromeDriverManager().install(),
                                        options=self.options)
        self.default_timeout = 20
        # Specifies the amount of time the driver should wait when trying to
        # find any element (or elements) if it is not immediately available.
        # The default setting is 0. Once set, the implicit wait is set for the
        # life of the WebDriver object.
        self.browser.implicitly_wait(self.default_timeout)
        # Set the amount of time to wait for a page load to complete before
        # throwing an error.
        # self.browser.set_page_load_timeout(self.default_timeout)
        # Set the amount of time that the script should wait during an
        # execute_async_script call before throwing an error.
        # self.browser.set_script_timeout(self.default_timeout)
        # Sets the width and height of the current window$
        self.browser.set_window_size(1920, 1080)
        # Opens the page
        self.open_url()
示例#8
0
def req_split(r):
    start = time.time()
    req_proxy = RequestProxy()
    #print("Initialization took: {0} sec".format((time.time() - start)))
    #print("Size: {0}".format(len(req_proxy.get_proxy_list())))
    #print("ALL = {0} ".format(list(map(lambda x: x.get_address(), req_proxy.get_proxy_list()))))
    request = req_proxy.generate_proxied_request(url)
    if request is not None:
        print("\t Response: ip={0}".format(u''.join(request.text).encode('utf-8')))
        print("-> Going to sleep..")
示例#9
0
def proxies():
    if os.path.isfile('proxies.txt'):
        pass
    else:
        req_proxy = RequestProxy()
        PROXIES = "{0}".format(
            list(map(lambda x: x.get_address(), req_proxy.get_proxy_list())))
        # Open the file for writing
        F = open('proxies.txt', 'w')
        F.writelines(PROXIES)
        F.close()
        pass
示例#10
0
def getInfo(url, artist, album):
    # Get the album page
    # An alternate way of doing things- use the rym search engine
    # url = https://rateyourmusic.com/search?searchterm=ARTIST+ALBUM&type=l
    #search the page for the artist and album, then go to that page
    url = url + artist + "/" + album + "/"
    try:
        req_proxy = RequestProxy()
        page = req_proxy.generate_proxied_request(
            url
        )  #PROBLEM, gets detected after a few different requests, unless you manually make it wait for multiple minutes between requests
        soup = BeautifulSoup(page.content, 'html.parser')
    except UnicodeDecodeError:
        print 'UnicodeDecodeError! Skipping...'
        return

    # Get genres from page
    genre_text = str(soup.findAll("span", {"class": "release_pri_genres"}))
    # Get secondary genres from page
    sec_genre_text = str(soup.findAll("span", {"class": "release_sec_genres"}))
    # Clean up and compile all genres
    unclean_genres = re.findall(r']">.*?</a>', genre_text)
    unclean_sec_genres = re.findall(r']">.*?</a>', sec_genre_text)
    genres = []
    for genre in unclean_genres:
        genre = genre[3:-4]
        genres.append(genre)
    for genre in unclean_sec_genres:
        genre = genre[3:-4]
        genres.append(genre)

    # Get descriptors from page
    descriptor_text = str(
        soup.findAll("span", {"class": "release_pri_descriptors"}))
    descriptor_text = descriptor_text[37:-7]
    # Clean up and organize each descriptor
    unclean_descriptors = re.findall(r'.*?,', descriptor_text)
    descriptors = []
    for descriptor in unclean_descriptors:
        descriptor = descriptor[2:-1]
        descriptors.append(descriptor)

    # Print genres
    genres = ';'.join(genre for genre in genres)
    print(artist + '->' + album + ' genres:'),
    print genres

    # Print descriptors
    descriptors = '; '.join(descriptor.title() for descriptor in descriptors)
    print(artist + '->' + album + ' descriptors:')
    print descriptors

    return genres, descriptors
示例#11
0
    def __init__(self,
                 db,
                 path_to_webdriver,
                 config=None,
                 logger=None,
                 cookies=None):
        self.logger = logger
        self.logger.info("webdriver path: ".format(path_to_webdriver))

        self.config = config

        chrome_options = ChromeOption()

        prefs = {"profile.default_content_setting_values.notifications": 2}
        chrome_options.add_experimental_option("prefs", prefs)

        # ignore error proxy
        chrome_options.add_argument('--ignore-certificate-errors')
        chrome_options.add_argument('--ignore-ssl-errors')

        # automatically dismiss prompt
        chrome_options.set_capability('unhandledPromptBehavior', 'dismiss')

        self.driver = webdriver.Chrome(path_to_webdriver,
                                       chrome_options=chrome_options)

        # get PROXY
        req_proxy = RequestProxy()
        proxies = req_proxy.get_proxy_list()

        # set PROXY
        PROXY = proxies[0].get_address()
        webdriver.DesiredCapabilities.CHROME['proxy'] = {
            "httpProxy": PROXY,
            "ftpProxy": PROXY,
            "sslProxy": PROXY,
            "proxyType": "MANUAL",
        }

        if cookies is None:
            self.cookies = self.driver.get_cookies()
        else:
            for cookie in cookies:
                self.driver.add_cookie(cookie)
            self.cookies = cookies
        self.db = db
示例#12
0
def main():
    api = InstagramAPI("_______jack______", "ghdlWk37qkqk*")
    api.login()  # login

    req_proxy = RequestProxy()
    proxy_index = 0
    num_proxies = len(req_proxy.proxy_list)
    api.s.proxies = {
        "http": "http://" + req_proxy.proxy_list[proxy_index].get_address()
    }

    target_name = 'tonyhong1004'
    api.searchUsername(target_name)
    target_pk = api.LastJson['user']['pk']
    prev_max_id = 0
    counter = 0
    max_id = GetFollowersAndRecord(api, target_pk,
                                   target_name + '_followers.txt')

    while True:
        if max_id == 'End':
            print "Happy finished."
            break
        elif max_id == prev_max_id:
            pdb.set_trace()
        else:
            proxy_index = (proxy_index + 1) % num_proxies
            counter += 1
        print counter

        prev_max_id = max_id

        api.s.proxies.update({
            "http":
            "http://" + req_proxy.proxy_list[proxy_index].get_address()
        })
        api.s.headers.update(
            dict(req_proxy.generate_random_request_headers().items()))
        # 헤더가 바뀌니깐 로그인도 다시해야하는듯
        api.login(force=True, dynamic_header=True)  # login
        #print ("ip: ", api.s.proxies)
        #print ("header: ", api.s.headers)
        max_id = GetFollowersAndRecord(api,
                                       target_pk,
                                       target_name + '_followers.txt',
                                       maxid=max_id)
示例#13
0
class _Proxy:
    ip = re.compile(r"\d+\.\d+\.\d+\.\d+")

    @staticmethod
    def get_ip():
        return _Proxy.ip.search(
            requests.get('http://icanhazip.com/').text).group(0)

    def __init__(self, conf, file=None):

        self.config = conf

        self.req_proxy = RequestProxy(protocol=Protocol.HTTP)
        if file is not None:
            self.proxies_list = self.config["proxies"]
        else:
            self.proxies_list = self.req_proxy.get_proxy_list()

    def get_proxy(self):
        logger = logging.getLogger(f"pid={os.getpid()}")

        while True:
            p = self.proxies_list.pop(0).get_address()

            try:
                logger.info(f"Trying {p}")
                proxy = {"http": f"http://{p}", "https": f"https://{p}"}
                ip = _Proxy.ip.search(
                    requests.get("http://icanhazip.com/",
                                 proxies=proxy,
                                 timeout=2).text)
                if ip.group(0) is None:
                    raise Exception()

                if ip.group(0) == self.get_ip():
                    raise Exception()

                if requests.get("http://google.com/", proxies=proxy,
                                timeout=5).status_code != 200:
                    raise Exception()

                return p

            except IndexError:
                logger.info(f"Loading more proxies")
                self.proxies_list = self.req_proxy.get_proxy_list()
示例#14
0
def working_proxies(countries_list=None):
    req_proxy = RequestProxy(
    )  #you may get different number of proxy when  you run this at each time
    proxies = req_proxy.get_proxy_list()  #this will create proxy list

    ##################################preparation of proxy list###############################
    proxies_address = list(map(lambda x: x.get_address(), proxies))
    proxies_countries = list(map(lambda x: str(x.country), proxies))

    df = pd.DataFrame({'countries': proxies_countries, 'IP': proxies_address})

    #obtain the asia countries want to keep
    #countries=Counter(proxies_countries)
    #for key, value in countries.items():
    #   print(key, value)

    if countries_list is None:
        countries_list = [  #'Indonesia'                                                             # total 23, 8/20
            #'Cambodia'                                                             # total 8, 3/8
            #'India'                                                                # total 23, 7/20
            'Hong Kong',  # total 10, 6/10        chosen
            'Thailand',  # total 26, 10/20       chosen
            #'Nepal'                                                                # total 11,  3/11
            #'Myanmar',                                                              # total 4, 2/4
            #'Bangladesh'                                                           # total 12, 2/12
            #'Philippines'                                                          # total 3, 1/3
            #'Singapore'                                                            # total 7, 2/7
            #'Mongolia'                                                             # total 4, 0/4
            'Vietnam',  # total 11, 6/11        chosen
            'Pakistan',  # total 14, 7/14        chosen
            #'Japan'                                                                # total 11, 1/11
            #'Korea'                                                                # total  1, 0/1
            #'China'                                                                # total 7, 0/7
            'Macau'  # total 3, 3/3           chosen
        ]

    df = df[df['countries'].isin(
        countries_list)]  #a df, coulumn: countries, IP addresses

    b = in_list(countries_list, df)

    print_results(b)

    return df
示例#15
0
 def __init__(self, mysql_instance, instragram_api, share):
     self.share = share #type: generator.Share
     self.instagram_api = instragram_api  # type: instagram_pi.InstagramAPI
     self.mysql_instance = mysql_instance # type: mysql.Connector
     self.proxy_request = RequestProxy()
     self.request = requests
     self.is_running_follower = False
     self.is_running_following = False
     self.is_running_followings_bd_update = False
     self.is_running_followers_bd_update = False
示例#16
0
class RequestMaker:
    def __init__(self):
        self.req_proxy = RequestProxy()

    def _generate_proxied_request(self, url, params=None):
        if params is None:
            params = {}
        for _ in range(0, len(self.req_proxy.get_proxy_list())):
            proxy_response = self.req_proxy.generate_proxied_request(
                url, params=params)
            if proxy_response is not None:
                return proxy_response
        return None

    def get(self, url, params=None):
        proxy_response = self._generate_proxied_request(url, params)
        if proxy_response is None:
            raise RuntimeError(
                'Failed to generate proxied request for {}'.format(url))

        return proxy_response
示例#17
0
 def __init__(self, instragram_api, mysql_instance):
     self.instagram_api = instragram_api  # type: instagram_pi.InstagramAPI
     self.max_follow_per_user = 20000
     self.interval = None
     self.is_running_strategy = False
     self.is_running_unfollow_strategy = False
     self.break_follow_seconds = 10
     self.followings = {}
     self.mysql_instance = mysql_instance  # type:  mysql.Connector
     self.request = requests
     self.proxy_request = RequestProxy()
     self.share = Shared()
     self.comparator = comparator.Compare(self.mysql_instance,
                                          self.instagram_api, self.share)
    def __init__(self,
                 stock_file,
                 data_storage_dir="./historical_stock_data",
                 threads=10,
                 clear_existing=True):
        """
        Initializes the proxy server as well as directories that all of
        the read in historical data will be stored to.

        Note: The directory structure could already exist and the data could already be there.
        It does not always make sense to delete the old data and start again.  If the clear_existing
        variable is set, clear the existing directories.  The default is to clear the existing
        directories containing historical data and start over.
        """

        self.proxy_server = RequestProxy()
        self.output_dir = data_storage_dir
        self.ticker_file = stock_file
        self.thread_limit = threads

        # If the user asks for it, clear the existing directory structure
        if clear_existing is True:
            self.clear_directories()

        # Check to see if the file containing ticker symbols exists
        if not os.path.exists(stock_file):
            raise BadTickerFile()

        # Try to make the directory structure that the data will be stored in
        self.setup_directories()

        try:
            os.makedirs("%s/dividends" % self.output_dir)
        except OSError:
            print "[Error]: Could not create directory structure."
            raise CannotCreateDirectory()
class Proxies:
    def __init__(self):
        self.req_proxy = RequestProxy()
        self.proxies = self.req_proxy.get_proxy_list(
        )  # this will create proxy list
        self.total = len(self.proxies)
        self.getExtraProxies()
        pass

    def getExtraProxies(self):
        addrs = []
        with open("proxy_list.csv", 'r') as f:
            lines = f.readlines()
        for line in lines:
            items = line.split()
            if len(items) < 2:
                continue
            if len(items[0].split('.')) == 4:
                addrs.append(items[0] + ":" + items[1])
        self.addrs = addrs

    def getProxy(self):
        # if(self.pointer+1 > self.total):
        #     self.renewProxies()
        #     self.pointer = 0
        rand_n = random.randint(0, self.total - 1)
        prox = self.proxies[rand_n]
        if self.isValid(prox) == False:
            rand_n = random.randint(0, len(self.addrs) - 1)
            return self.addrs[rand_n]
        # while self.isValid(prox) == False:
        #     rand_n = random.randint(0, self.total-1)
        #     prox = self.proxies[rand_n]
        #     print(prox.get_address())
        #     pass
        # self.pointer += 1

        return prox

    def isValid(self, prox):
        checker = ProxyChecker()
        return checker.check_proxy(prox.get_address())

    def getAddresses(self):
        addrs = [x.get_address() for x in self.proxies]
        return addrs
示例#20
0
    def handle(self, *args, **options):
        print('Start Parsing')
        req_proxy = RequestProxy()
        file_ = open(os.path.join(PROJECT_ROOT, 'zipcodes.csv'))
        repeat_request_zip = []
        with file_ as csvfile:
            zip_reader = csv.reader(csvfile, delimiter=',')
            for row in zip_reader:
                zip_code = row[0]
                city = row[1]
                state = row[2]
                print('Parse Zip: %s' % zip_code)
                if not self.make_request(state, city, zip_code, req_proxy):
                    repeat_request_zip.append(row)

        for zip_code_info in repeat_request_zip:
            zip_code = zip_code_info[0]
            city = zip_code_info[1]
            state = zip_code_info[2]
            print('Parse Zip: %s' % zip_code)
            self.make_request(state, city, zip_code, req_proxy)
示例#21
0
def Run(request):
    if request.method == 'POST':
        if request.POST.get('param'):

            from selenium import webdriver
            from selenium.webdriver.common.keys import Keys
            from selenium.common.exceptions import ElementClickInterceptedException
            from selenium.webdriver.common.action_chains import ActionChains
            from selenium.webdriver.common.by import By
            from selenium.webdriver.support.ui import WebDriverWait
            from selenium.webdriver.support import expected_conditions as EC
            import time
            from selenium.webdriver.support.ui import Select
            import random
            from http_request_randomizer.requests.proxy.requestProxy import RequestProxy
            from Home.models import Emails
            from selenium.webdriver.remote.webdriver import WebDriver

            #webdriver local location
            path = "C:\Program Files\chromedriver.exe"

            #list of tags and urls
            URLs = [
                [
                    "http://snappysurveys.net/", "email", "started",
                    "first_name", "last_name", "dob_month", "dob_day",
                    "dob_year", "addr_full", "addr_city",
                    "addr_state_province", "addr_zip", "addr_phone", "offers1",
                    "offers2", "offers3", "offers4", "offers5", "submitBtn"
                ],
                [
                    "https://youreducationfuture.com/", "firstname",
                    "lastname", "address", "city", "state", "email",
                    "areacode", "phone", "btn btn-large btn-primary"
                ],
                [
                    "https://www.nationalpayday.com/", "first_name", "email",
                    "amount", "option", "submit"
                ],
                [
                    "http://mycharityshopping.com/", "fname", "lname",
                    "exampleInputEmail1", "pwd1", "pwd2", "checkbox"
                ],
                [
                    "http://mortgageloans101.com/index.php/mortgage-quote-form/",
                    "wpforms[fields][9]", "wpforms[fields][10]",
                    "wpforms[fields][18]", "wpforms[fields][14]",
                    "wpforms[fields][7]", "wpforms[fields][15]",
                    "wpforms[fields][0][first]", "wpforms[fields][0][last]",
                    "wpforms[fields][3]", "wpforms[fields][2]",
                    "wpforms-107-field_12_1", "wpforms[submit]"
                ],
                [
                    "http://kidsdineforfree.com/", "fname", "lname", "email",
                    "pwd1", "pwd2", "newsletter", "frmaction"
                ],
                [
                    "http://emortgagefinders.com/", "input_5", "input_6",
                    "input_50", "input_8", "input_10", "input_51",
                    "input_12.3", "input_12.6", "input_14",
                    "gform_next_button_6_1"
                ],
                [
                    "http://consumerofferstore.com/", "fname", "lname",
                    "email", "contact", "state", "city", "country", "checkbox",
                    "checkbox1", "a-b3xqfy75bf3j", "Submit"
                ]
            ]

            #lise of american fake identities
            Identities = [
                [
                    "Mary", "M.Pfister", "NewYork", "Huntington",
                    "4662 Duncan Avenue", "+1 610-934-1119", "11743"
                ],
                [
                    "Raymond", "M.Gamboa", "Kentucky", "Owensboro",
                    "4072 Coffman Alley", "+1 270-691-3671", "42301"
                ],
                [
                    "Pamela", "K.Smith", "Georgia", "Atlanta",
                    "1707 Musgrave Street", "+1 404-934-8171", "30303"
                ],
                [
                    "Nadine", "B.Lowe", "Arizona", "Superstition",
                    "423 East Avenue", "+1 480-358-3654", "85207"
                ],
                [
                    "Oscar", "L.Merrill", "Georgia", "Atlanta",
                    "411 Pine Garden Lane", "+1 770-741-7993", "30305"
                ],
                [
                    "Theresa", "K.Johnson", "Florida", "Sunrise",
                    "1116 Ridenour Street", "+1 786-306-3113", "33323"
                ],
                [
                    "Theodore", "J.Mejia", "Georgia", "Atlanta",
                    "2207 Edington Drive", "+1 678-799-9599", "30303"
                ],
                [
                    "Kenneth", "E.Pabon", "Maryland", "Sykesville",
                    "15 Woodhill Avenue", "+1 410-795-2288", "21784"
                ],
                [
                    "Juanita", "J.Perry", "Iowa", "Des Moines",
                    "4372 Southern Avenue", "+1 641-328-8365", "50309"
                ],
                [
                    "Pamela", "J.Chancellor", "Iowa", "Westside",
                    "2497 Centennial Farm Road", "+1 712-663-4676", "51467"
                ],
                [
                    "Mack", "P.King", "California", "Burbank",
                    "2181 Quiet Valley Lane", "+1 818-972-1068", "91502"
                ],
                [
                    "Chris", "M.Bibb", "Ohio", "Dayton", "1580 College Avenue",
                    "+1 937-434-9264", "45459"
                ],
                [
                    "Dorothy", "J.Honeycutt", "New Jersey", "Camden",
                    "939 Valley Street", "+1 856-885-6555", "08102"
                ],
                [
                    "Scott", "E.Brown", "California", "Bakersfield",
                    "179 Atha Drive", "+1 661-586-6085", "93304"
                ],
                [
                    "Barry", "L.Murchison", "Kentucky", "Pleasant Ridge",
                    "2210 Broaddus Avenue", "+1 270-275-3710", "40769"
                ],
                [
                    "Maye", "L.Moseley", "Michigan", "Grand Rapids",
                    "916 Goff Avenue", "+1 269-589-1746", "49503"
                ],
                [
                    "Jerry", "Y.Winn", "Tennessee", "Portland",
                    "422 Frum Street", "+1 615-325-8391", "37148"
                ],
                [
                    "Andrew", "N.Jones", "Ohio", "Cincinnati",
                    "2576 Goldie Lane", "+1 513-374-9889", "45214"
                ],
                [
                    "Timothy", "B.Frye", "California", "Sherman Oaks",
                    "3789 Par Drive", "+1 805-808-3371", "91403"
                ],
                [
                    "Kevin", "D.Carrillo", "Alabama", "Opelika",
                    "1774 Fleming Street", "+1 334-364-1184", "36801"
                ]
            ]
            #["fname","lname","state","city","adress","phone number"]

            #list of e-mails
            emails = [
                1, 2, 3, 4, 5, 6, 7, 8, 9, 7, 8, 9, 7, 8, 9, 7, 8, 9, 7, 8, 9,
                7, 8, 9, 7, 8, 9
            ]

            #list of proxies
            PROXIES = [
                '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0',
                '0', '0', '0', '0', '0', '0', '0', '0'
            ]
            req_proxy = RequestProxy(
            )  #you may get different number of proxy when  you run this at each time
            proxies = req_proxy.get_proxy_list()  #this will create proxy list
            for i in range(20):
                PROXIES[i] = proxies[i].get_address()

            count = Emails.objects.all().count()
            emails[0] = Emails.objects.first().Email
            k = Emails.objects.first().pk
            k = k + 1
            for j in range(1, count - 1):
                if Emails.objects.get(pk=k):

                    emails[j] = Emails.objects.get(pk=k).Email
                    k = k + 1
                else:
                    k = k + 1

            #Registration in 1st website
            for email in emails:
                proxy = random.choice(PROXIES)
                webdriver.DesiredCapabilities.CHROME['proxy'] = {
                    "httpProxy": proxy,
                    "ftpProxy": proxy,
                    "sslProxy": proxy,
                    "proxyType": "MANUAL",
                }
                i = random.randint(0, 20)
                browser = webdriver.Chrome(path)
                browser.maximize_window()
                browser.get(URLs[0][0])
                time.sleep(2)
                element = browser.find_element_by_name(URLs[0][1])
                element.send_keys(email)
                element = browser.find_element_by_class_name(URLs[0][2])
                element.click()
                window_after = browser.window_handles[1]
                browser.switch_to.window(window_after)
                time.sleep(5)
                element = browser.find_element_by_name(URLs[0][3])
                element.send_keys(Identities[i][0])
                element = browser.find_element_by_name(URLs[0][4])
                element.send_keys(Identities[i][1])
                browser.find_element_by_xpath(
                    "//select[@name='dob_month']/option[text()='December']"
                ).click()
                browser.find_element_by_xpath(
                    "//select[@name='dob_day']/option[text()='1']").click()
                browser.find_element_by_xpath(
                    "//select[@name='dob_year']/option[text()='2000']").click(
                    )
                element = browser.find_element_by_name(URLs[0][8])
                element.send_keys(Identities[i][4])
                element = browser.find_element_by_name(URLs[0][9])
                element.send_keys(Identities[i][3])
                element = browser.find_element_by_name(URLs[0][10])
                element.send_keys(Identities[i][2])
                element = browser.find_element_by_name(URLs[0][11])
                element.send_keys(Identities[i][6])
                element = browser.find_element_by_name(URLs[0][12])
                element.send_keys(Identities[i][5])
                element = browser.find_element_by_name(URLs[0][13])
                element.click()
                element = browser.find_element_by_name(URLs[0][14])
                element.click()
                element = browser.find_element_by_name(URLs[0][15])
                element.click()
                element = browser.find_element_by_name(URLs[0][16])
                element.click()
                browser.find_element_by_xpath(
                    "//input[@name='offers4' and @value=5]").click()
                element = browser.find_element_by_name(URLs[0][18])
                element.click()
                browser.close()

            #Registration in 2nd website
            for email in emails:
                proxy = random.choice(PROXIES)
                webdriver.DesiredCapabilities.CHROME['proxy'] = {
                    "httpProxy": proxy,
                    "ftpProxy": proxy,
                    "sslProxy": proxy,
                    "proxyType": "MANUAL",
                }
                i = random.randint(0, 20)
                browser = webdriver.Chrome(path)
                browser.get(URLs[1][0])
                time.sleep(2)
                element = browser.find_element_by_name(URLs[1][1])
                element.send_keys(Identities[i][0])
                element = browser.find_element_by_name(URLs[1][2])
                element.send_keys(Identities[i][1])
                element = browser.find_element_by_name(URLs[1][3])
                element.send_keys(Identities[i][4])
                element = browser.find_element_by_name(URLs[1][4])
                element.send_keys(Identities[i][3])
                element = browser.find_element_by_name(URLs[1][5])
                element.send_keys(Identities[i][2])
                element = browser.find_element_by_name(URLs[1][6])
                element.send_keys(email)
                element = browser.find_element_by_name(URLs[1][7])
                element.send_keys('907')
                element = browser.find_element_by_name(URLs[1][8])
                element.send_keys(Identities[i][5])
                element = browser.find_element_by_class_name(URLs[1][9])
                element.click()
                browser.close()

            #Registration in 3nd website
            for email in emails:
                proxy = random.choice(PROXIES)
                webdriver.DesiredCapabilities.CHROME['proxy'] = {
                    "httpProxy": proxy,
                    "ftpProxy": proxy,
                    "sslProxy": proxy,
                    "proxyType": "MANUAL",
                }
                i = random.randint(0, 20)
                browser = webdriver.Chrome(path)
                browser.get(URLs[2][0])
                time.sleep(2)
                element = browser.find_element_by_name(URLs[2][1])
                element.send_keys(Identities[i][0])
                element = browser.find_element_by_name(URLs[2][2])
                element.send_keys(email)
                browser.find_element_by_xpath(
                    "//select[@name='amount']/option[text()='$600']").click()
                element = browser.find_element_by_name(URLs[2][5])
                element.click()
                browser.close()

            #Registration in 4th website
            for email in emails:
                proxy = random.choice(PROXIES)

                webdriver.DesiredCapabilities.CHROME['proxy'] = {
                    "httpProxy": proxy,
                    "ftpProxy": proxy,
                    "sslProxy": proxy,
                    "proxyType": "MANUAL",
                }
                i = random.randint(0, 20)
                browser = webdriver.Chrome(path)
                browser.get(URLs[3][0])
                time.sleep(2)
                element = browser.find_element_by_name(URLs[3][1])
                #element.send_keys(Identities[i][0])
                element = browser.find_element_by_name(URLs[3][2])
                element.send_keys(Identities[i][1])
                element = browser.find_element_by_id(URLs[3][3])
                element.send_keys(email)
                element = browser.find_element_by_name(URLs[3][4])
                element.send_keys("password")
                element = browser.find_element_by_name(URLs[3][5])
                element.send_keys("password")
                element = browser.find_element_by_class_name(URLs[3][6])
                element.click()
                element = browser.find_element_by_xpath(
                    "//button[@type='submit' and @class='btn btn-orange']")
                element.click()
                browser.close()

            #Registration in 5th website
            for email in emails:
                proxy = random.choice(PROXIES)

                webdriver.DesiredCapabilities.CHROME['proxy'] = {
                    "httpProxy": proxy,
                    "ftpProxy": proxy,
                    "sslProxy": proxy,
                    "proxyType": "MANUAL",
                }
                i = random.randint(0, 20)
                browser = webdriver.Chrome(path)
                browser.get(URLs[4][0])
                time.sleep(2)
                browser.find_element_by_xpath(
                    "//select[@name='wpforms[fields][9]']/option[text()='Refinance']"
                ).click()
                browser.find_element_by_xpath(
                    "//select[@name='wpforms[fields][10]']/option[text()='Condo']"
                ).click()
                browser.find_element_by_xpath(
                    "//select[@name='wpforms[fields][18]']/option[text()='Alabama']"
                ).click()
                browser.find_element_by_xpath(
                    "//select[@name='wpforms[fields][14]']/option[text()='$50,000 - $60,000']"
                ).click()
                browser.find_element_by_xpath(
                    "//select[@name='wpforms[fields][7]']/option[text()='Excellent (800+)']"
                ).click()
                browser.find_element_by_xpath(
                    "//select[@name='wpforms[fields][15]']/option[text()='Yes']"
                ).click()
                element = browser.find_element_by_name(URLs[4][7])
                element.send_keys(Identities[i][0])
                element = browser.find_element_by_name(URLs[4][8])
                element.send_keys(Identities[i][1])
                element = browser.find_element_by_name(URLs[4][9])
                element.send_keys(Identities[i][5])
                element = browser.find_element_by_name(URLs[4][10])
                element.send_keys(email)
                element = browser.find_element_by_xpath(
                    "//input[@type='checkbox' and @id='wpforms-107-field_12_1']"
                )
                element.click()
                #element = browser.find_element_by_id(URLs[4][11])
                #element.click()
                element = browser.find_element_by_name(URLs[4][12])
                element.click()
                browser.close()

            #Registration in 6th website
            for email in emails:
                proxy = random.choice(PROXIES)

                webdriver.DesiredCapabilities.CHROME['proxy'] = {
                    "httpProxy": proxy,
                    "ftpProxy": proxy,
                    "sslProxy": proxy,
                    "proxyType": "MANUAL",
                }
                i = random.randint(0, 20)
                browser = webdriver.Chrome(path)
                browser.get(URLs[5][0])
                time.sleep(2)
                element = browser.find_element_by_name(URLs[5][1])
                element.send_keys(Identities[i][0])
                element = browser.find_element_by_name(URLs[5][2])
                element.send_keys(Identities[i][1])
                element = browser.find_element_by_name(URLs[5][3])
                element.send_keys(email)
                element = browser.find_element_by_name(URLs[5][4])
                element.send_keys("password")
                element = browser.find_element_by_name(URLs[5][5])
                element.send_keys("password")
                element = browser.find_element_by_name(URLs[5][6])
                element.click()
                element = browser.find_element_by_name(URLs[5][7])
                element.click()
                browser.close()

        return render(request, 'Home/Home.html')
示例#22
0
# -*- coding: utf-8 -*
#!/usr/bin/python
#pip install http-request-randomizer
#####################################
##KILL THE NET##
##############[LIBS]###################
from http_request_randomizer.requests.proxy.requestProxy import RequestProxy
import sys
while 1:
    try:
        ip = sys.argv[1]
        api = 'http://api.hackertarget.com/reverseiplookup/?q='+ip
        req_proxy = RequestProxy()
        try:
            request = req_proxy.generate_proxied_request(api)
            if request:
                if 'error' in request.text or 'No DNS' in request.text:
                    break
                if 'API count exceeded' in request.text or 'Bad Request' in request.text:
                    continue
                else:
                    open(ip+'.txt','a').write(request.text+'\n')
                    open('ALL-SITES.txt','a').write(request.text+'\n')
                    break
        except:
            pass
    except Exception as e:
        print(e)
        break
class YFHistoricalDataExtract(object):
    """
    Function for grabbing historical stock data from yahoo finance.  Utilizes
    the HTTP_Request_Randomizer library to make proxied function calls so as to
    avoid IPbans from relevant sources.

    <More Info Here!!!>
    """
    def __init__(self,
                 stock_file,
                 data_storage_dir="./historical_stock_data",
                 threads=10,
                 clear_existing=True):
        """
        Initializes the proxy server as well as directories that all of
        the read in historical data will be stored to.

        Note: The directory structure could already exist and the data could already be there.
        It does not always make sense to delete the old data and start again.  If the clear_existing
        variable is set, clear the existing directories.  The default is to clear the existing
        directories containing historical data and start over.
        """

        self.proxy_server = RequestProxy()
        self.output_dir = data_storage_dir
        self.ticker_file = stock_file
        self.thread_limit = threads

        # If the user asks for it, clear the existing directory structure
        if clear_existing is True:
            self.clear_directories()

        # Check to see if the file containing ticker symbols exists
        if not os.path.exists(stock_file):
            raise BadTickerFile()

        # Try to make the directory structure that the data will be stored in
        self.setup_directories()

        try:
            os.makedirs("%s/dividends" % self.output_dir)
        except OSError:
            print "[Error]: Could not create directory structure."
            raise CannotCreateDirectory()

    def clear_directories(self):
        """
        Wipe the existing directory structure if it exists.
        """

        os.system("rm -rf %s" % self.output_dir)

    def setup_directories(self):
        if not os.path.exists(self.output_dir):
            try:
                os.makedirs(self.output_dir)
            except OSError as e:
                print "[ERROR]: %s" % str(e)
                raise CannotCreateDirectory()

        if not os.path.exists(self.output_dir + "/dividend_history"):
            try:
                os.makedirs(self.output_dir + "/dividend_history")
            except OsError as e:
                print "[ERROR]: %s" % str(e)
                raise CannotCreateDirectory()

    def get_historical_data(self):
        stock_file = open(self.ticker_file, "r")

        candidates_to_test = []

        pool = ThreadPool(self.thread_limit)

        for ticker in stock_file.readlines():
            candidates_to_test.append(ticker.strip())

        pool.map(self.read_ticker_historical, candidates_to_test)

    def read_ticker_historical(self, ticker_symbol):
        URL = "https://finance.yahoo.com/quote/%s/history/" % ticker_symbol
        response = None

        # Loop until you get a valid response
        while True:
            try:
                response = self.proxy_server.generate_proxied_request(
                    URL, req_timeout=5)
            except Exception as e:
                print "Exception: %s %s" % (ticker_symbol, str(e))
                return

            if response is None:
                continue

            if response.__dict__['status_code'] == 200:
                break

        response_soup = BeautifulSoup(response.text, 'html5lib')

        # Find all rows in the historical data.
        response_soup = response_soup.find_all("tr")
        response_soup = response_soup[2:]

        json_history_file = open(
            "%s/%s.json" % (self.output_dir, ticker_symbol), "w")
        json_dividend_file = open(
            "%s/%s_dividend.json" %
            (self.output_dir + "/dividend_history", ticker_symbol), "w")

        historical_data = {
            'Date': [],
            'Open': [],
            'High': [],
            'Low': [],
            'Close': [],
            'Adj Close': [],
            'Volume': []
        }

        dividend_data = {'Date': [], 'Amount': []}

        for response in response_soup:
            filtered_response = response.find_all("td")

            if len(filtered_response) == 7:

                # Date
                historical_data["Date"].append(filtered_response[0].text)

                # Open
                historical_data["Open"].append(filtered_response[1].text)

                # High
                historical_data["High"].append(filtered_response[2].text)

                # Low
                historical_data["Low"].append(filtered_response[3].text)

                # Close
                historical_data["Close"].append(filtered_response[4].text)

                # Adj Close
                historical_data["Adj Close"].append(filtered_response[5].text)
            elif len(filtered_response) == 2:

                # Date
                dividend_data["Date"].append(filtered_response[0].text)

                # Dividend Amount
                amount = filtered_response[1].text.replace(" Dividend", "")
                dividend_data["Amount"].append(amount)
            else:
                continue

        json_history_file.write(json.dumps(historical_data))
        json_dividend_file.write(json.dumps(dividend_data))

        json_history_file.close()
        json_dividend_file.close()
示例#24
0
 def get_search_links(self, prodname):
     url = 'https://www.google.fr/search?q=reference%20"' + "%20".join(
         prodname.split(" ")) + '"'
     urllist = []
     headers = {
         'User-Agent':
         'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36'
     }
     try:
         page = requests.get(url, headers=headers)
         soup = BeautifulSoup(page.content)
         for link in soup.find_all("a"):
             if (link.has_attr('href')):
                 if ("https://" in link['href']
                         and "webcache" not in link['href']
                         and "google." not in link['href']
                         and "youtube." not in link['href']):
                     templink = link['href'].split("&")[0]
                     if ("https:" in templink):
                         urllist.append("http" + templink.split("http")[1])
         if (len(urllist) == 0):
             itr = 0
             while itr < 5:
                 try:
                     req_proxy = RequestProxy()
                     request = req_proxy.generate_proxied_request(url)
                     if (request.status_code == 200
                             and request is not None):
                         soup = BeautifulSoup(request.content)
                         for link in soup.find_all("a"):
                             if (link.has_attr('href')):
                                 if ("https://" in link['href']
                                         and "webcache" not in link['href']
                                         and "google." not in link['href']
                                         and "youtube."
                                         not in link['href']):
                                     templink = link['href'].split("&")[0]
                                     if ("https:" in templink):
                                         urllist.append(
                                             "http" +
                                             templink.split("http")[1])
                         if (len(urllist) > 0):
                             itr = 6
                             break
                         else:
                             itr = itr + 1
                 except:
                     itr = itr + 1
                     continue
             if (len(urllist) == 0):
                 urllist = list(
                     search(query="%20".join(prodname.split(" ")),
                            tld="fr",
                            lang="fr",
                            num=10,
                            start=1,
                            stop=20))
         self.logger.info("Number of sites found:" + str(len(urllist)))
     except Exception as e:
         self.logger.info("Error:" + str(e))
         self.logger.info("Failed prod:" + prodname)
     return (urllist)
示例#25
0
else:
    query = (artistName + spaceInput + songName + spaceInput + mixName)
    text = (codeOpen + artistName + spaceInput + hyphenInput + spaceInput +
            songName + '(' + mixName + ' Remix' + ')' + codeClose)
    newName = ('/storage/emulated/0/temp/' + artistName + hyphenInput +
               songName + hyphenInput + mixName + '.mp3')

print('\nWorking on Request: ' + query)

# baseURL

url = 'https://mp3cc.biz/search/f/' + query + '/'

# proxy_headersRequest

req_proxy = RequestProxy()

while not req_proxy.generate_proxied_request(url):
    print('\nNext proxy for "Base URL"')
else:
    print('\nConnected to "Base URL!"')
    pass

# saveToFile

with open('parse.txt', 'wb') as f:
    response = requests.get(url)
    f.write(response.content)

# parseFromFile
import os
import re
from multiprocessing.dummy import Pool as ThreadPool

import requests
from http_request_randomizer.requests.proxy.requestProxy import RequestProxy

red = '\033[91m'
green = '\033[92m'
white = '\033[00m'

req_proxy = RequestProxy()
os.system('cls' if os.name == 'nt' else 'clear')
logo = '''  
______                               ___________   _                 _                
| ___ \                             |_   _| ___ \ | |               | |               
| |_/ /_____   _____ _ __ ___  ___    | | | |_/ / | |     ___   ___ | | ___   _ _ __  
|    // _ \ \ / / _ \ '__/ __|/ _ \   | | |  __/  | |    / _ \ / _ \| |/ / | | | '_ \ 
| |\ \  __/\ V /  __/ |  \__ \  __/  _| |_| |     | |___| (_) | (_) |   <| |_| | |_) |
\_| \_\___| \_/ \___|_|  |___/\___|  \___/\_|     \_____/\___/ \___/|_|\_\\__,_| .__/ 
                                                                               | |    
Mister Spy Tool View Dns Unlimited 

'''

print red + logo + white


def taz(i):
    try:
        i = i.replace('\n', '').replace('\r', '')
示例#27
0
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import TimeoutException
from http_request_randomizer.requests.proxy.requestProxy import RequestProxy
from webdriver_manager.chrome import ChromeDriverManager
import time
from time import sleep
import random

accNo = int(input("No of Accounts: "))

print("Requesting Proxies:")

req_proxy = RequestProxy()
proxies = req_proxy.get_proxy_list()
maxNum = len(proxies)
print("\n \n Max No of Proxies:" + str(maxNum) + "\n \n")

i = 0

while accNo > i:
    k = random.randint(0, maxNum - 1)
    l = random.randint(0, maxNum - 1)
    PROXY = proxies[k].get_address()
    PROXY_PLACE = proxies[k].country
    first_name = (random.choice(open("Fnames.txt").read().split()))
    last_name = (random.choice(open("Lnames.txt").read().split()))
    full_name = (first_name + ' ' + last_name)
    username = (first_name + last_name + '.' + str(random.randint(1, 100)) +
示例#28
0
 def __init__(self):
     self.req_proxy = RequestProxy()
示例#29
0
    webdriver.DesiredCapabilities.CHROME['proxy']={
        "httpProxy":PROXY,
        "ftpProxy":PROXY,
        "sslProxy":PROXY,
        "proxyType":"MANUAL",
        'trustAllServers':'true',
        
    }
    browser = webdriver.Chrome(options=chrome_options)
    return browser

#logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(logging.WARNING)
#from selenium.webdriver.remote.remote_connection import LOGGER
#LOGGER.setLevel(logging.WARNING)

req_proxy = RequestProxy() #you may get different number of proxy when  you run this at each time
proxies = req_proxy.get_proxy_list() #this will create proxy list
sp = [] #int is list of Indian proxy
for proxy in proxies:
    if proxy.country == 'Spain':
        sp.append(proxy)
proxies=sp


browser_categories=["popular","blocks","featured","beta"]
data=get_all_popular_plugins()
json_data=json.dumps(data)

current_date_and_time = datetime.datetime.now()
current_date_and_time_string = str(current_date_and_time)
extension = ".json"
                                                                                                            
                                                                                                            
"""

print(colored(logo, 'magenta'))
print(colored('Mass Account Creator ! Beta v0.2', 'magenta'))
print('')
print('')

fichier = open("log.txt", "a")
fichier.close
accNo = int(
    input(colored(current_time + " " + "How many accounts ? : ", 'magenta')))

print(colored(current_time + " " + "[-]Requesting Proxies:", 'blue'))
req_proxy = RequestProxy()
req_proxy.set_logger_level(50)
proxies = req_proxy.get_proxy_list()
maxNum = len(proxies)
print(colored(current_time + " " + "[+]Proxies Requested !", 'green'))
print(
    colored(current_time + " " + " [+] Max Proxies:" + str(maxNum) + "\n \n",
            'green'))

i = 0

while accNo > i:
    k = random.randint(0, maxNum - 1)
    l = random.randint(0, maxNum - 1)
    PROXY = proxies[k].get_address()
    PROXY_PLACE = proxies[k].country