Example #1
0
class AuM(object):
    def __init__(self):
        # Create a session and authenticate
        self._s = Session(
            webdriver_path='/usr/lib/chromium-browser/chromedriver',
            browser='chrome',
            webdriver_options={"arguments": ["--headless"]})
        self._s.headers.update({
            'User-Agent':
            'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:63.0) Gecko/20100101 Firefox/63.0'
        })
        self._s.get('https://www.adopteunmec.com')  # Maybe not needed
        # Register a new account
        rand_s = ''.join(
            random.choice(string.ascii_lowercase + string.digits)
            for _ in range(6))
        print('email used: francois_%[email protected]' % rand_s)
        r = self._s.post('https://www.adopteunmec.com/register/index',
                         data={
                             'sex': '1',
                             'day': '03',
                             'month': '4',
                             'year': '1997',
                             'email': '*****@*****.**' % rand_s,
                             'password': '******',
                             'password_check': 'Adottami1',
                             'country': 'fr',
                             'zipcode': '06000',
                             'city': 'Nice',
                             'confirm_city': '0',
                             'pseudo': 'RedoAA',
                             'cgu': '1',
                             'reg_submit': '',
                             'by_popup': '1',
                             'PreventChromeAutocomplete': ''
                         },
                         headers={
                             "X-Requested-With": "XMLHttpRequest",
                             "Origin": "https://www.adopteunmec.com/",
                             "Referer": "https://www.adopteunmec.com/"
                         })
        status = r.json()
        # If registration were successful, go to redirect page to confirm account
        if (status['success'] == 1):
            self._s.get(status['redirect'])
        else:
            print('Something went wrong....')

        self._common_names = (
            'loic', 'marc', 'anthony', 'tom', 'jordan', 'florian', 'jean',
            'manu', 'seb', 'alex', 'lilian', 'angelo', 'fred', 'valent',
            'fabrice', 'fabien', 'nico', 'thomas', 'sylvain', 'tim', 'karim',
            'robin', 'pierre', 'arnaud', 'max', 'luc', 'mike', 'yann', 'oliv',
            'yvan', 'jerem', 'michel', 'mat', 'kev', 'damien', 'vinc', 'eric',
            'gilles', 'jona', 'bruno', 'simon', 'adri', 'serge', 'tony', 'jul',
            'quentin', 'leo', 'step', 'gab', 'david', 'paul', 'killian',
            'alvaro', 'ronan', 'anto', 'jb', 'jp', 'jon', 'patrick', 'virgile',
            'juju', 'stef', 'franck', 'alan', 'alain', 'albin', 'alban',
            'fran', 'cyril', 'laure', 'phil', 'jacques', 'jack', 'ludo',
            'chris', 'vic', 'jo', 'charles', 'geoffrey', 'igor', 'ciro',
            'erwan', 'fabio', 'guillaume', 'thibaut', 'romain', 'rafa',
            'lionel', 'cedric', 'xavier')

    def _common_name(self, name):
        return len(
            filter(lambda x: x is False,
                   map(lambda n: name.lower().find(n) < 0,
                       self._common_names))) > 0

    def search_by_region(self, age_min=20, age_max=30, region=1, sex=0):
        return self.search({
            'age[min]': age_min,
            'age[max]': age_max,
            'by': 'region',
            'region': region,
            "sex": sex
        })

    def search_by_disance(self, age_min=20, age_max=30, distance=40, sex=0):
        return self.search({
            'age[min]': age_min,
            'age[max]': age_max,
            'by': 'distance',
            'distance[max]': distance,
            "sex": sex
        })

    def search(self, criteria=None):
        if criteria is None:
            return []

        # Go to search page
        self._s.get('https://www.adopteunmec.com/mySearch')
        # POST a request
        r = self._s.post('https://www.adopteunmec.com/mySearch/save',
                         data=criteria)

        time.sleep(3)  # Wait a bit...
        # Trasnfer cookie to selenium, refresh the page, scroll to end 10 times, and get profiles
        self._s.transfer_session_cookies_to_driver()
        self._s.driver.get('https://www.adopteunmec.com/mySearch/results')
        for i in range(10):
            self._s.driver.find_element_by_tag_name('html').send_keys(Keys.END)
            time.sleep(.1)
        html = BeautifulSoup(
            self._s.driver.execute_script("return document.body.innerHTML"),
            'lxml')
        self._s.transfer_driver_cookies_to_session()
        self._s.driver.close()  # Might be done before ?

        # Look for <div> tags containing user info
        blocks = html.find_all('div', {'class': 'user-infos'})
        # Get all <a> tags in a same list
        all_a = [a for sl in [b.find_all('a') for b in blocks] for a in sl]
        # Extract profiles ID doing common name checks to avoid visit too may profiles later
        profiles = [
            l.get('href').split('/')[-1] for l in all_a
            if isinstance(l.get('href'), str)
            and l.get('href').find('profile') > 0 and len(l.get_text()) > 2
            and not self._common_name(l.get_text())
        ]
        return profiles

    def update_db(self,
                  profiles=[],
                  max_p=None,
                  filename='data/justemenemoi.json'):
        db = {}
        try:
            with open(filename, 'r') as in_f:
                db = json.load(in_f)
        except:
            pass

        visited = 0
        for uid in profiles:
            # Check if profile already in db
            if uid not in db:
                if max_p is not None and visited >= max_p:
                    break
                visited += 1

                url = "https://www.adopteunmec.com/profile/" + uid
                page = self._s.get(url)
                html = BeautifulSoup(
                    page.content.decode('utf-8', 'xmlcharrefreplace'))

                name = html.find('div', {'class': 'username'}).get_text()
                desc = html.find(text='Description').find_parent('div').find(
                    'p').get_text()
                shop = html.find(text='Shopping List').find_parent('div').find(
                    'p').get_text()
                # Profile Filtering
                if desc.find("non renseign") >= 0 or shop.find(
                        "non renseign") >= 0 or len(desc) < 20 or len(
                            shop) < 20:
                    continue

                img_url = html.find(id='img-current-pic')['src']
                img_name = img_url.split('/')[-1]
                db[uid] = {
                    "profile": url,
                    "name": name,
                    "img": img_name,
                    "age": html.find('span', {
                        'class': 'age'
                    }).get_text(),
                    "city": html.find('span', {
                        'class': 'city'
                    }).get_text(),
                    "desc": desc,
                    "shop": shop
                }

                # Download and save profile pic
                pic = self._s.get(img_url, stream=True)
                pic.raw.decode_content = True
                with open("data/pics/" + img_name, 'wb') as f:
                    shutil.copyfileobj(pic.raw, f)

                time.sleep(.5)  # Bit of rest...

        # Write back json
        json_s = json.dumps(
            db)  # Dump as a string, to write to file and as JS var
        with open(filename, 'w') as out_f:
            out_f.write(json_s)
        with open(filename + '.js', 'w') as out_f:
            out_f.write("data = ")
            out_f.write(json_s)
Example #2
0
class API:
    def __init__(self, path):
        self.last_json = ""
        self.last_response = None
        self.IG_SIG_KEY = '4f8732eb9ba7d1c8e8897a75d6474d4eb3f5279137431b2aafb71fafe2abe178'
        self.SIG_KEY_VERSION = '4'
        self.USER_AGENT = 'Instagram 10.26.0 Android ({android_version}/{android_release}; 640dpi; 1440x2560; {manufacturer}; {device}; {model}; samsungexynos8890; en_US)'.format(
            **DEVICE_SETTINTS)
        self.s = Session(webdriver_path=path,
                         browser='chrome',
                         default_timeout=15)
        self.logger = logging.getLogger('[instatesi_{}]'.format(id(self)))
        self.privateUsers = {}
        self.users = {}
        fh = logging.FileHandler(filename='instatesi.log')
        fh.setLevel(logging.INFO)
        fh.setFormatter(logging.Formatter('%(asctime)s %(message)s'))

        ch = logging.StreamHandler()
        ch.setLevel(logging.DEBUG)
        ch.setFormatter(
            logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))

        self.logger.addHandler(fh)
        self.logger.addHandler(ch)
        self.logger.setLevel(logging.DEBUG)
        self.lastUserHandled = None

    def saveScrapedFollowers(self):
        import json
        self.logger.info("Except for the selected Followers...")
        if not os.path.exists(os.getcwd() + "/ScrapedFollowers/" +
                              self.lastUserHandled + ".txt"):
            with open(
                    os.getcwd() + "/ScrapedFollowers/" + self.lastUserHandled +
                    ".txt", "w") as f:
                """ f.write("Scraped following from " + self.lastUserHandled +"\n")
                f.write("-------Non private users-------\n")
                f.write(json.dumps(self.users[''], indent=2))
                f.write("\n-------Private users-------\n")
                f.write(json.dumps(self.privateUsers, indent=2)) """
                for k, v in self.users.items():
                    f.write(k + ',\n')
            self.logger.info("Following successfully saved!")
            self.users = dict()
            self.privateUsers = dict()

        else:
            self.logger.warning(
                "Warning! The user is already present in the database. Overwrite?"
            )
            #Define some logic for file overwriting

    def saveScrapedFollowing(self):
        import json
        self.logger.info("Except for the Following's Following...")
        if not os.path.exists(os.getcwd() + "/ScrapedFollowing/" +
                              self.lastUserHandled + ".txt"):
            with open(
                    os.getcwd() + "/ScrapedFollowing/" + self.lastUserHandled +
                    ".txt", "w") as f:
                f.write("Scraped following from " + self.lastUserHandled +
                        "\n")
                f.write("-------Non private users-------\n")
                f.write(json.dumps(self.users, indent=2))
                f.write("\n-------Private users-------\n")
                f.write(json.dumps(self.privateUsers, indent=2))
            self.logger.info("Following successfully saved!")
            self.users = dict()
            self.privateUsers = dict()
        else:
            self.logger.warning(
                "Warning! The user is already present in the database. Overwrite?"
            )
            #Define some logic for file overwriting

    def getUserFollowers(self, userID, rank_token, selection="followers"):
        self.logger.info("User ID follower scraping started " + str(userID))
        followers = self.getTotalFollowers(userID,
                                           rank_token,
                                           fromInput=selection)
        return [str(item['username'])
                for item in followers][::-1] if followers else []

    def __getUsernameInfo(self, usernameId):
        return self.__send_request('users/' + str(usernameId) + '/info/')

    def __send_request_for_user_followers(self,
                                          user_id,
                                          rank_token,
                                          max_id='',
                                          selection="followers"):
        url = 'friendships/{user_id}/followers/?rank_token={rank_token}' if selection == "followers" else 'friendships/{user_id}/following/?max_id={max_id}&ig_sig_key_version={sig_key}&rank_token={rank_token}'
        url = url.format(
            user_id=user_id,
            rank_token=rank_token) if selection == "followers" else url.format(
                user_id=user_id,
                max_id=max_id,
                sig_key=self.SIG_KEY_VERSION,
                rank_token=rank_token)
        if max_id:
            url += '&max_id={max_id}'.format(max_id=max_id)
        return self.__send_request(url)

    def searchUsername(self, username):
        url = 'users/{username}/usernameinfo/'.format(username=username)
        self.logger.info("Looking for user information " + username)
        return self.__send_request(url)

    def getUsernameFromID(self, user_id):
        url = 'users/{user_id}/info/'.format(user_id=user_id)
        self.__send_request(url)
        self.logger.info("Return the requested username, or " +
                         str(self.last_json['user']['username']))
        return self.last_json['user']['username']

    def __generateSignature(self, data, IG_SIG_KEY, SIG_KEY_VERSION):
        body = hmac.new(
            IG_SIG_KEY.encode('utf-8'), data.encode('utf-8'),
            hashlib.sha256).hexdigest() + '.' + urllib.parse.quote(data)
        signature = 'ig_sig_key_version={sig_key}&signed_body={body}'
        return signature.format(sig_key=SIG_KEY_VERSION, body=body)

    def castUsernameToUserID(self, usernameToLook):
        self.lastUserHandled = usernameToLook
        userID = ""
        self.searchUsername(usernameToLook)
        if "user" in self.last_json:
            userID = str(self.last_json["user"]["pk"])
        self.logger.info("The username " + usernameToLook +
                         " corresponds to the ID " + userID)
        return userID

    def seeStories(self):
        self.__send_request("feed/reels_tray/")
        return self.last_json

    def getTotalFollowers(self, usernameId, rank_token, fromInput="followers"):
        sleep_track = 0
        followers = []
        next_max_id = ''
        self.__getUsernameInfo(usernameId)
        if "user" in self.last_json:
            total_followers = self.last_json["user"][
                'follower_count'] if fromInput == "followers" else self.last_json[
                    "user"]['following_count']
            if total_followers > 200000:
                self.logger.warning(
                    "There are over 200,000 followers. It may take a while.")
        else:
            return False
        with tqdm(total=total_followers,
                  desc="Retrieving followers",
                  leave=False) as pbar:
            while True:
                self.__send_request_for_user_followers(usernameId,
                                                       rank_token,
                                                       next_max_id,
                                                       selection=fromInput)
                temp = self.last_json
                try:
                    pbar.update(len(temp["users"]))
                    for item in temp["users"]:
                        if item['is_private']:
                            self.privateUsers[item['username']] = {
                                'ID': item['pk'],
                                'user_handle': item['username'],
                                'is_verified': item['is_verified'],
                                'is_private': item['is_private'],
                                'profile pic': item['profile_pic_url'],
                                'Full Name': item['full_name']
                            }
                        else:
                            self.users[item['username']] = {
                                'ID': item['pk'],
                                'user_handle': item['username'],
                                'is_private': item['is_private'],
                                'is_verified': item['is_verified'],
                                'profile pic': item['profile_pic_url'],
                                'Full Name': item['full_name']
                            }
                        followers.append(item)
                        sleep_track += 1
                        if sleep_track >= 20000:
                            import random
                            sleep_time = random.randint(120, 180)
                            self.logger.info("Waiting for " +
                                             str(float(sleep_time / 60)) +
                                             " due to excessive demands.")
                            time.sleep(sleep_time)
                            sleep_track = 0
                    if len(temp["users"]
                           ) == 0 or len(followers) >= total_followers:
                        self.logger.info(
                            "Returning account followers in the scraping phase, ie "
                            + str(len(followers[:total_followers])))
                        return followers[:total_followers]
                except Exception:
                    self.logger.error(
                        "Returning account followers in the scraping phase, ie "
                        + str(len(followers[:total_followers])))
                    return followers[:total_followers]
                if temp["big_list"] is False:
                    self.logger.info(
                        "Returning account followers in the scraping phase, ie "
                        + str(len(followers[:total_followers])))
                    return followers[:total_followers]
                next_max_id = temp["next_max_id"]

    def __send_request(self,
                       endpoint,
                       post=None,
                       login=False,
                       with_signature=True):
        self.s.headers.update({
            'Connection': 'close',
            'Accept': '*/*',
            'Content-type': 'application/x-www-form-urlencoded; charset=UTF-8',
            'Cookie2': '$Version=1',
            'Accept-Language': 'en-US',
            'User-Agent': self.USER_AGENT
        })
        try:
            if post is not None:  # POST
                if with_signature:
                    post = self.__generateSignature(post, self.IG_SIG_KEY,
                                                    self.SIG_KEY_VERSION)
                response = self.s.post('https://i.instagram.com/api/v1/' +
                                       endpoint,
                                       data=post)
            else:  # GET
                response = self.s.get('https://i.instagram.com/api/v1/' +
                                      endpoint)
        except Exception as e:
            self.logger.error("Exception due to endpoint " + endpoint)
            self.logger.error(e)
            return False
        if response.status_code == 200:
            self.logger.info("The request to the endpoint " + endpoint +
                             " has been successful")
            self.last_response = response
            self.last_json = json.loads(response.text)
            return True
        else:
            return False
Example #3
0
class Monster():
	api_throttle_secs = 3

	def __init__( self ):
		self.verbose = False
		self._session = Session(
				webdriver_path=''
				,browser='chrome'
				,default_timeout=15
				,webdriver_options={
						'arguments' : [ 'headless' ]
					}
			)

	@sleep_and_retry
	@limits( calls=1, period=api_throttle_secs )
	def apply( self, job_link ):
		'''Apply to the job at the given job link for Monster.com.

		Args:
			job_link (str_or_SearchResult): the speed apply link for the job to apply to.

		Returns:
			bool: True if successful, False otherwise.
		'''
		if isinstance( job_link, SearchResult ):
			job_link = job_link.ApplyLink
		apply_result = self._session.get( job_link )
		if apply_result.status_code == 200:
			if apply_result.json()['success'] == True:
				return True
			elif self.verbose:
				print( job_link )
				print( apply_result.json() )
		return False

	def batchApply( self, job_links ):
		''' Apply to all jobs in the list of job links given
		
		Args:
			job_links (list_or_generator): List, tuple, or generator of job links
		
		Returns:
			jobs_applied_to (int): The number of jobs applied to successfully
		'''
		jobs_quantity = 0
		quantity_applied_to = 0
		if not isinstance( job_links, types.GeneratorType ):
			jobs_quantity = len( job_links )
		progress_bar = tqdm(
			total=jobs_quantity
			,desc='Applying'
			,unit='Jobs' 
		)
		for job_link in job_links:
			if isinstance( job_links, types.GeneratorType ):
				progress_bar.total += 1
			if self.apply( job_link ):
				progress_bar.update( 1 )
		jobs_applied_to = progress_bar.n
		return jobs_applied_to

	@sleep_and_retry
	@limits( calls=1, period=api_throttle_secs )
	def login( self, email, password ):
		'''Login to the Monster.com job board site.

		Args:
			email (str): Email address for logging into Monster.com.
			password (str): Password corresponding to email address to
				login to Monster.com job board site.

		Returns:
			bool: True if successful, False otherwise.
		'''

		# GOTO LOGIN PAGE TO CHECK IF AVAILABLE & GET COOKIES
		login_page = self._session.get( SITE['login'] )
		if login_page.status_code != 200:
			raise Exception( 'ERROR: COULD NOT GET LOGIN PAGE FOR MONSTER.COM : ' + SITE['login'] )

		# BUILD FORM DATA
		login_data = {
			'AreCookiesEnabled'			:	True
			,'EmailAddress'				: 	email
			,'IsComingFromProtectedView':	False
			,'IsKeepMeLoggedInEnabled'	:	True
			,'Password'					:	password
			,'PersistLogin'				:	True
		}
		request_verification_token = \
			login_page.xpath('//input[@name="__RequestVerificationToken"]/@value').extract()[0]
		login_data.update( { '__RequestVerificationToken' : request_verification_token } )

		# LOGIN
		login_result = self._session.post( SITE['login'], data=login_data )
		if login_result.status_code == 200:
			return True
		else:
			return False

	@sleep_and_retry
	@limits( calls=1, period=api_throttle_secs )
	def getJobDetails( self, job_link ):
		''' Get dictionary of details of the job, such as title and description.

		Args:
			job_link (str or int): Either a url containing the job id in the format
				of jobid={}, such as the apply link or the job page link. Or, directly
				supply the job id if it is available.

		Returns:
			job_dict (dict): Dictionary of the job link, job title, company name,
				job address, and job description.
		'''
		job_link = str( job_link )
		if not 'jobid' in job_link:
			job_id = job_link
		else:
			job_id = parse.parse_qs( parse.urlparse( job_link ).query )['jobid'][0]
		job_url = SITE[ 'job' ].format( job_id )
		job_page = self._session.get( job_url )
		job_json = job_page.json()
		job_description = job_json[ 'jobDescription' ]
		job_title = job_json[ 'companyInfo' ][ 'companyHeader' ]
		company_name = job_json[ 'companyInfo' ][ 'name' ]
		job_address = job_json[ 'companyInfo' ][ 'jobLocation' ]
		job_dict = {
			'job_link'          :   job_link
            ,'job_title'        :   job_title
            ,'job_address'      :   job_address
            ,'company_name'     :   company_name
            ,'job_description'  :   job_description
		}
		return job_dict

	def search( self, quantity=25, filter_out_recruiting_agencies=True, **kwargs ):
		''' Search Monster.com with the given filters and yield job links.
		
		Args:
			quantity (int): The max number of results to return.
			kwargs (dict): Dictionary of filters, such as keywords, 
				type (full_time,part_time), and posteddaysago.
				
		Returns:
			SearchResult (namedtuple): generator of named tuples, each
				containing an ApplyLink and a DetailsLink. The ApplyLink,
				when followed, will apply for the job automatically. The 
				Details link will return json data about the job.
		'''
		search_url = SITE['search']['root']
		
		# HANDLE SPECIAL CASE OF JOB TYPE, WHICH MUST PRECEED QUERY
		job_type_value = ''
		if 'type' in kwargs:
			job_type = kwargs['type']
			options = SITE['search']['type']['options']
			job_type_value = options[job_type] if job_type in options else ''
			kwargs.pop( 'type' )
		search_url = search_url.format(
			type=urllib.parse.quote_plus( job_type_value )
		)
			
		# FORMAT URL WITH REMAINING FILTERS
		for search_field, search_value in kwargs.items():
			if search_field in SITE['search']:
				if isinstance( SITE['search'][search_field], dict ):
					options = SITE['search'][search_field]['options']
					if search_value in options:
						options_value = options[search_value]
						search_url += '+' + urllib.parse.quote_plus( options_value )
				else:
					search_format = SITE['search'][search_field]
					search_url += \
						'&{0}'.format(search_format.format(urllib.parse.quote_plus(search_value)))

		@sleep_and_retry
		@limits( calls=1, period=self.api_throttle_secs )
		def getPage( page ):
			paged_search_url = search_url + '&page=' + str( page )
			search_page = self._session.get( paged_search_url )
			return search_page
		
		# GET AND PROCESS RETURNED JSON
		quantity_returned = 0
		page = 1
		while quantity_returned < quantity:
			search_page = getPage( page )
			if search_page.status_code != 200:
				break
			search_json = search_page.json()
			for app_dict in search_json:
				if all( key in app_dict for key in ( 'MusangKingId', 'ApplyType' ) ):
					if app_dict['MusangKingId'] != 0 and app_dict['ApplyType'] != None:			# filter jobs that are missing data / poorly formatted
						if any( x in app_dict['ApplyType'] \
							for x in QUICK_APPLY_KEYWORDS ):							# filter to include quick apply jobs only
							if not any( x.lower() in app_dict['Company']['Name'].lower() \
								for x in RECRUITING_AGENCY_KEYWORDS ) or \
								not filter_out_recruiting_agencies:						# filter jobs from recruiting agencies
								job_id = app_dict['MusangKingId']
								apply_url = SITE['speedapply'].format( job_id )
								details_url = SITE['job'].format( job_id )
								search_result = SearchResult( apply_url, details_url )
								quantity_returned += 1
								yield search_result
					if quantity_returned >= quantity:
						break
			page += 1
Example #4
0
def gen(num, limit):
    s = Session(webdriver_path='chromedriver.exe', browser='chrome')
    s.driver.get("https://privacy.com/login")
    time.sleep(3)
    s.driver.find_element_by_xpath(
        '//*[@id="steps"]/div/form/div[2]/label[1]/input').send_keys(
            config['username'])
    s.driver.find_element_by_xpath(
        '//*[@id="steps"]/div/form/div[2]/label[2]/input').send_keys(
            config['password'])
    time.sleep(1)
    s.driver.find_element_by_xpath(
        '//*[@id="steps"]/div/form/div[3]/button').click()
    time.sleep(2)
    s.transfer_driver_cookies_to_session()
    s.driver.quit()
    url1 = "https://privacy.com/api/v1/card"

    for i in range(int(num)):
        h1 = {
            'Accept':
            'application/json, text/plain, */*',
            'Accept-Encoding':
            'gzip, deflate, br',
            'Accept-Language':
            'en-US,en;q=0.9',
            'Authorization':
            'Bearer {}'.format(s.cookies['token']),
            'Connection':
            'keep-alive',
            'Content-Type':
            'application/json;charset=UTF-8',
            'Cookie':
            'sessionID={}; ETag="ps26i5unssI="; waitlist_cashback=%7B%22refByCode%22%3A%22favicon.ico%22%2C%22isPromotional%22%3Afalse%7D; landing_page=extension-rewards-landing; token={}'
            .format(s.cookies['sessionID'], s.cookies['token']),
            'Host':
            'privacy.com',
            'Origin':
            'https://privacy.com',
            'Pragma':
            'no-cache',
            'Referer':
            'https://privacy.com/home',
            'User-Agent':
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36'
        }

        pay = {
            "type": "MERCHANT_LOCKED",
            "spendLimitDuration": "MONTHLY",
            "memo": genName(),
            "meta": {
                "hostname": ""
            },
            "style": 'null',
            "spendLimit": int(limit),
            "reloadable": 'true'
        }

        r = s.post(url1, json=pay, headers=h1)

        if r.status_code == requests.codes.ok:
            print("[{}] !~Created Card~!".format(r.json()['card']['cardID']))
            with open("cards.txt", "a+") as file:
                file.write("{}:{}/{}:{}\n".format(r.json()['card']['pan'],
                                                  r.json()['card']['expMonth'],
                                                  r.json()['card']['expYear'],
                                                  r.json()['card']['cvv']))
        else:
            print("Error Creating Card")
Example #5
0
class Charme(object):
    def __init__(self):
        # Create a session and authenticate
        self._s = Session(
            webdriver_path='/usr/lib/chromium-browser/chromedriver',
            browser='chrome')  #,
        #webdriver_options={"arguments": ["--headless"]})
        self._s.headers.update({
            'User-Agent':
            'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:63.0) Gecko/20100101 Firefox/63.0'
        })

        # Login
        r = self._s.post('https://www.adopteunmec.com/auth/login',
                         data={
                             'username': '******',
                             'password': '******'
                         })
        if not r.ok:
            raise 'Something wrong in login'
        else:
            time.sleep(2)

    def search_by_distance(self, age_min=20, age_max=30, distance=40, sex=1):
        return self.search({
            'age[min]': age_min,
            'age[max]': age_max,
            'by': 'distance',
            'distance[max]': distance,
            "sex": sex
        })

    def search(self, criteria=None):
        if criteria is None:
            return []

        # Go to search page
        self._s.get('https://www.adopteunmec.com/mySearch')
        time.sleep(1)
        # POST a request
        r = self._s.post('https://www.adopteunmec.com/mySearch/save',
                         data=criteria)

        time.sleep(3)  # Wait a bit...
        # Trasnfer cookie to selenium, refresh the page, scroll to end 10 times, and get profiles
        self._s.transfer_session_cookies_to_driver()
        self._s.driver.get('https://www.adopteunmec.com/mySearch/results')
        for i in range(10):
            self._s.driver.find_element_by_tag_name('html').send_keys(Keys.END)
            time.sleep(.1)
        html = BeautifulSoup(
            self._s.driver.execute_script("return document.body.innerHTML"),
            'lxml')
        self._s.transfer_driver_cookies_to_session()
        self._s.driver.close()  # Might be done before ?

        # Look for <div> tags containing user info
        blocks = html.find_all('div', {'class': 'user-infos'})
        # Get all <a> tags in a same list
        all_a = [a for sl in [b.find_all('a') for b in blocks] for a in sl]
        # Extract profiles ID doing common name checks to avoid visit too may profiles later
        profiles = [
            l.get('href').split('/')[-1] for l in all_a
            if isinstance(l.get('href'), str)
            and l.get('href').find('profile') > 0 and len(l.get_text()) > 2
        ]
        return profiles

    def charme(self, profiles=[], max_p=10, filename='data/charme.json'):
        db = {}
        try:
            with open(filename, 'r') as in_f:
                db = json.load(in_f)
        except:
            pass

        visited = 0
        for uid in profiles:
            # Check if profile already in db
            if uid not in db:
                if max_p is not None and visited >= max_p:
                    break
                visited += 1

                url = "https://www.adopteunmec.com/profile/" + uid
                print "Visiting", url
                page = self._s.get(url)
                html = BeautifulSoup(
                    page.content.decode('utf-8', 'xmlcharrefreplace'), 'lxml')

                img_url = html.find(id='img-current-pic')['src']
                img_name = img_url.split('/')[-1]
                date = datetime.datetime.now().strftime("%m-%d %H:%M")
                db[uid] = {
                    "profile":
                    url,
                    "name":
                    html.find('div', {
                        'class': 'username'
                    }).get_text(),
                    "img":
                    img_name,
                    "age":
                    html.find('span', {
                        'class': 'age'
                    }).get_text(),
                    "city":
                    html.find('span', {
                        'class': 'city'
                    }).get_text(),
                    "desc":
                    html.find(text='Description').find_parent('div').find(
                        'p').get_text(),
                    "shop":
                    html.find(text='Shopping List').find_parent('div').find(
                        'p').get_text(),
                    "charmed":
                    date
                }

                # Download and save profile pic
                pic = self._s.get(img_url, stream=True)
                pic.raw.decode_content = True
                with open("data/pics/" + img_name, 'wb') as f:
                    shutil.copyfileobj(pic.raw, f)

                time.sleep(20)  # Bit of rest...

                # Send a charme
                url = "https://www.adopteunmec.com/events/charm?id=" + uid
                r = self._s.get(url)
                if r.json()['member']['id'] != uid:
                    raise 'Something wrong in response'

        # Write back json
        json_s = json.dumps(
            db)  # Dump as a string, to write to file and as JS var
        with open(filename, 'w') as out_f:
            out_f.write(json_s)