Esempio n. 1
0
def handler(event, context):
    class job:
        def __init__(self, title, summary):
            self.title = title
            self.summary = summary

    search_string = event['queryStringParameters']['search']
    location = event['queryStringParameters']['state']
    
    client = ScraperAPIClient(os.environ.get('API_KEY'))
    monster_url = f'https://www.monster.com/jobs/search?q={search_string}&where={location}&page=1'
    page = client.get(url=monster_url, render=True)
    soup = BeautifulSoup(page.content, 'html.parser')
    results = soup.find(class_='results-page')
    job_elems = results.find_all('div', class_='results-card')
    jobs = {
        "jobs": []
    }
    for job_elem in job_elems:
        title_elem = job_elem.find('div', class_='title-company-location')
        summary_elem = job_elem.find('div', class_='results-card-description')
        jobs['jobs'].append( job(title_elem.text.strip(), summary_elem.text.strip()))
    result = json.dumps(jobs, default = lambda x: x.__dict__)
    # result = jobs
    return {
        'statusCode': 200,
        'headers': {
            'Access-Control-Allow-Origin': '*',
            'Access-Control-Allow-Headers': '*',
            'Access-Control-Allow-Methods': 'OPTIONS,POST,GET'
        },
        'body': result
    }
    def start_requests(self):
        secret = self.properties.properties['Scraper_secret']
        print("Properties:", self.properties.properties)
        scrape_url = "Scraper_Vomar_Scrape_url"
        client = ScraperAPIClient(secret)

        for i in self.properties.properties.keys():
            if str(i).startswith(scrape_url):
                url = self.properties.properties[i]
                property_values = self.properties.properties[i].split(";")
                priority = i.split("_")[4]
                try:
                    category = property_values[1]
                except IndexError as e:
                    category = property_values[0].split("/")[-1].replace(
                        "?sort=relevancy+asc", "")

                print("Category:", category)

                print("Calling yield function for url: ", url)
                yield scrapy.Request(client.scrapyGet(url=url),
                                     self.parse,
                                     dont_filter=True,
                                     priority=int(priority),
                                     meta={'category': category})
 def start_requests(self):
     secret = self.properties.properties['Scraper_secret']
     scrape_url = "Scraper_Deen_scrape_url_1"
     client = ScraperAPIClient(secret)
     url = self.properties.properties[scrape_url] + '?items=6000'
     print("URl is:", url)
     yield scrapy.Request(client.scrapyGet(url=url),
                          self.parse,
                          dont_filter=True)
Esempio n. 4
0
 def __init__(self, key: str, lang="en", period="", ua=""):
     assert key != ""
     self.client = ScraperAPIClient(key)
     self.user_agent = ua
     self.__texts = []
     self.__titles = []
     self.__links = []
     self.__results = []
     self.__lang = lang
     self.__period = period
     self.__exec_time = 0
Esempio n. 5
0
    def get_proxies_from_scraper_api(cls, proxy_count=10):
        client = ScraperAPIClient('3af7d62e85b75e0271d32f245107a240')
        proxies = set()

        for i in range(1, proxy_count):
            result = client.get(url='http://httpbin.org/ip').text
            json_data = json.loads(result)
            print(json_data)
            proxies.add(json_data["origin"])

        print(proxies)

        return proxies
Esempio n. 6
0
    def __init__(self, settings):
        if not settings.getbool('SCRAPERAPI_ENABLED', True):
            raise NotConfigured

        self.SCRAPERAPI_KEY = settings.get('SCRAPERAPI_KEY', '')
        self.SCRAPERAPI_RENDER = settings.get('SCRAPERAPI_RENDER', False)
        self.SCRAPERAPI_PREMIUM = settings.get('SCRAPERAPI_PREMIUM', False)
        self.SCRAPERAPI_COUNTRY_CODE = settings.get('SCRAPERAPI_COUNTRY_CODE',
                                                    '')

        self.SCRAPERAPI_CLIENT = None
        try:
            self.SCRAPERAPI_CLIENT = ScraperAPIClient(self.SCRAPERAPI_KEY)
        except:
            raise NotConfigured
Esempio n. 7
0
    def init_scraper(cls, api_key):
        """ inits the scraper client

        """

        # create the ScraperAPIClient
        cls.scraper = ScraperAPIClient(api_key)
Esempio n. 8
0
def check_setup():
    """Prints to the user if their setup is successful.
	"""
    from eBayScraper.data_files.api_keys import api_keys
    from scraper_api import ScraperAPIClient

    all_good = True

    if api_keys == [] or d == {}:
        all_good = False

    #validate api keys
    for key in api_keys:
        try:
            ScraperAPIClient(key).account()
        except:
            all_good = False
            break

    os.system('color')
    if all_good:
        print(colored("Setup is successful!", "green"))
    else:
        print(
            colored(
                "Make sure the list in data_files/api_keys.py and the dictionary in data_files/queries.py are not empty!",
                "red"))
Esempio n. 9
0
class ScraperApiProxyMiddleware(object):
    def __init__(self, settings):
        if not settings.getbool('SCRAPERAPI_ENABLED', True):
            raise NotConfigured

        self.SCRAPERAPI_KEY = settings.get('SCRAPERAPI_KEY', '')
        self.SCRAPERAPI_RENDER = settings.get('SCRAPERAPI_RENDER', False)
        self.SCRAPERAPI_PREMIUM = settings.get('SCRAPERAPI_PREMIUM', False)
        self.SCRAPERAPI_COUNTRY_CODE = settings.get('SCRAPERAPI_COUNTRY_CODE',
                                                    '')

        self.SCRAPERAPI_CLIENT = None
        try:
            self.SCRAPERAPI_CLIENT = ScraperAPIClient(self.SCRAPERAPI_KEY)
        except:
            raise NotConfigured

    @classmethod
    def from_crawler(cls, crawler):
        o = cls(crawler.settings)
        return o

    def process_request(self, request, spider):
        if 'api.scraperapi.com' not in request.url:
            log.info("Process request...")
            new_url = self.SCRAPERAPI_CLIENT.scrapyGet(
                url=request.url,
                render=self.SCRAPERAPI_RENDER,
                country_code=self.SCRAPERAPI_COUNTRY_CODE,
                premium=self.SCRAPERAPI_PREMIUM)

            log.info("New url: {}".format(new_url))
            return request.replace(url=new_url)
        return
Esempio n. 10
0
    def initialize_client():
        """Initializes the Client's data before starting up the scraping.
		"""
        Client.data = [(key, ScraperAPIClient(key).account()["requestCount"])
                       for key in api_keys]
        Client.current_client, Client.requests = Client.current_client_and_requests(
        )
        Client.requests_limit = 1000
Esempio n. 11
0
def check_availability(URL, scraper_api_key):
    # To prevent from being blocked.
    sleep(randint(30, 40))
    client = ScraperAPIClient(scraper_api_key)
    result = client.get(URL)

    parsed_html = BeautifulSoup(result.content, 'html.parser')

    availability = parsed_html.select("#availability")

    if availability:

        availability = availability[0].getText().strip().splitlines()[0]

        if availability != "Currently unavailable.":
            return True

    return False
Esempio n. 12
0
def main():
	parser = argparse.ArgumentParser(description='Parses command line arguments')
	parser.add_argument('--scraper_api_key', type=str, required=True)
	args = parser.parse_args()

	client = ScraperAPIClient(args.scraper_api_key)
	result = json.loads(client.get(url='http://httpbin.org/ip').text)
	print('Rotated proxy IP address = ' + result['origin'])

	urls = [
		client.scrapyGet(url='http://quotes.toscrape.com/page/1/'),
		client.scrapyGet(url='http://quotes.toscrape.com/page/2/'),
	]

	for url in urls:
		r = requests.get(url)
		# add parsing logic here
		print(r.status_code)
Esempio n. 13
0
    def start_requests(self):
        secret = self.properties.properties['Scraper_secret']
        print("Properties:", self.properties.properties)
        scrape_url = "Scraper_Dirk_Scrape_url"
        client = ScraperAPIClient(secret)

        for i in self.properties.properties.keys():
            if str(i).startswith(scrape_url):
                url = self.properties.properties[i]
                property_value = self.properties.properties[i].split("/")
                priority = i.split("_")[4]
                category = property_value[4]
                print("Calling yield function for url: ", url)
                yield scrapy.Request(client.scrapyGet(url=url),
                                     self.parse,
                                     dont_filter=True,
                                     priority=int(priority),
                                     meta={'category': category})
Esempio n. 14
0
    def __init__(self, api_key):
        self._api_key = api_key
        self._client = ScraperAPIClient(api_key)

        assert api_key is not None

        super(ScraperAPI, self).__init__()

        self._TIMEOUT = 60
        self._session = self._client
        self._session.proxies = {}
    def start_requests(self):
        secret = self.properties.properties['Scraper_secret']
        print("Properties:", self.properties.properties)
        scrape_url = "Scraper_AH_scrape_url"
        client = ScraperAPIClient(secret)

        start_urls = list()
        priorities = list()
        for i in self.properties.properties.keys():
            if str(i).startswith(scrape_url):
                start_urls.append(self.properties.properties[i])
                priorities.append(i.split("_")[4])
        print("Key start urls are:", start_urls, "Priorities:", priorities)
        urls = set()
        for priority, url in zip(priorities, start_urls):
            urls.add(url + '?page=' + str(100))
            print("Calling yield function for url: ", url + '?page=' + str(100))
            yield scrapy.Request(
                client.scrapyGet(url=url + '?page=' + str(100)),
                self.parse, dont_filter=True, priority=int(priority))
Esempio n. 16
0
    def getInfoInnvictus(self, apiKey='aba275ef5f8f713e086a1a0ab240dd5c'):
        """ Method to retrieve a dict given launching shoes on innvictus.com/lanzamientos """

        self.client = ScraperAPIClient(apiKey)

        result = self.client.get(url='https://www.innvictus.com/lanzamientos')
        assert result.status_code == 200, f"Status code {result.status_code}"

        soup = BeautifulSoup(result.content, 'html.parser')
        links = soup.find('body').find_all("script",
                                           attrs={'type': 'text/javascript'})
        s = links[3]  # links[3] element gives var products
        productVar = re.search(r'\'(.*?)\'', str(s))
        productDict = json.loads(
            productVar.group(0).replace("'{\"id",
                                        "[{\"id").replace("\"}'", "\"}]"))
        #print(productVar.group(0).replace("'{\"id","[{\"id").replace("\"}'","\"}]"))

        for tenis in productDict:
            tenis['url'] = "https://www.innvictus.com/p/{id}".format(
                id=tenis['id'])
        self.products = productDict
Esempio n. 17
0
def product_scraper(url):
    from scraper_api import ScraperAPIClient
    from bs4 import BeautifulSoup
    import json

    client = ScraperAPIClient('9aa1dbc863b8334850efccb9be3552f8')

    try:
        page = client.get(url=url, render=True)
    except:
        result = {'status_code': 500, 'status': 'scraper api fatal error',
                  'elapsed_time': '', 'price': '', 'title': ''}
        return json.dumps(result)

    if page.status_code != 200:
        result = {'status_code': page.status_code, 'status': 'error',
                  'elapsed_time': int(page.elapsed.total_seconds()), 'price': '', 'title': ''}
        return json.dumps(result)

    else:
        soup = BeautifulSoup(page.content, 'html.parser')
        result_title = soup.findAll(
            "span", {"class": "pdp-mod-product-badge-title"})
        result_price = soup.findAll("div", {"class": "pdp-product-price"})

        if result_title and result_price:
            title = result_title[0].text
            price = float(result_price[0].find_next(
                'span').text.strip('RM').replace(',', ''))

            result = {'status_code': page.status_code, 'status': 'success', 'elapsed_time': int(
                page.elapsed.total_seconds()), 'title': title, 'price': price}
            return json.dumps(result)

        else:
            result = {'status_code': 500, 'status': 'blocked/nocontent',
                      'elapsed_time': int(page.elapsed.total_seconds()), 'title': '', 'price': ''}
            return json.dumps(result)
def store_product_info(product_keys):
    for key in product_keys:
        url = gen_url(base_url, count, offset, page, store_id, key)
        try:
            client = ScraperAPIClient(settings.SCRAPER_API_KEY)
            result = client.get(url=url)
            if result.status_code == 200:
                res_json = result.json()
            elif result.status_code == 500:
                raise Exception("Request not successful, status: 500")
            elif result.status_code == 403:
                raise Exception("Plan max request exceeded, status: 403")
            elif result.status_code == 404:
                raise Exception("Request not found, status: 404")
            elif result.status_code == 410:
                raise Exception("Request gone or deleted, status: 410")
        except Exception as e:
            logger.error("failed to fetch product info, error: " + str(e))

        try:
            store_to_db(res_json["products"])
        except Exception as e:
            logger.error("failed to save product info, error: " + str(e))
Esempio n. 19
0
async def graph(request):
    scraper_api_key = getenv("SCRAPER_API_KEY") if getenv("SCRAPER_API_KEY") else None

    # Example url where pair graph can be fetched
    # url = 'https://gov.capital/forex/usd-eur/'
    pair = 'usd-eur'
    url = 'https://gov.capital/forex/{}/'.format(pair)

    client = ScraperAPIClient(scraper_api_key)
    request_result = client.get(url, render=True).text
    soup = BeautifulSoup(request_result)

    # Removing all divs containing ads
    for ads in soup.find_all("div", {"class": "code-block code-block-2"}):
        # Removes all ads in fetched page source
        ads.decompose()

    cleaned_graph_html = '<script src="https://code.jquery.com/jquery-3.5.1.slim.min.js" integrity="sha256-4+XzXVhsDmqanXGHaHvgh1gMQKX40OUvDEBTu8JcmNs=" crossorigin="anonymous"></script>\n\
<script src="https://cdnjs.cloudflare.com/ajax/libs/Chart.js/2.4.0/Chart.min.js"></script>'

    cleaned_graph_html = cleaned_graph_html + soup.find('canvas').next.__str__()

    return html(cleaned_graph_html)
Esempio n. 20
0
def handler(event, context):

    client = ScraperAPIClient(os.environ.get('API_KEY'))
    URL = "https://www.indeed.com/jobs?q=Entry+Level+Software+Engineer&l=Remote"
    page = client.get(url=URL)
    print(page)
    soup = BeautifulSoup(page.content, 'html.parser')
    print(soup)
    results = soup.find(id='resultsCol')
    print(results)
    job_elems = results.find_all('div', class_='jobsearch-SerpJobCard')
    titles = []
    companies = []
    summaries = []
    for job_elem in job_elems:
        title_elem = job_elem.find('h2', class_='title')
        company_elem = job_elem.find('div', class_='sjcl')
        summary_elem = job_elem.find('div', class_='summary')
        titles.append(title_elem.text.strip())
        companies.append(company_elem.text.strip())
        summaries.append(summary_elem.text.strip())

    response = {
        "titles": titles,
        "companies": companies,
        "summaries": summaries
    }

    return {
        'statusCode': 200,
        'headers': {
            'Access-Control-Allow-Headers': '*',
            'Access-Control-Allow-Origin': '*',
            'Access-Control-Allow-Methods': 'OPTIONS,POST,GET'
        },
        'body': json.dumps(response)
    }
Esempio n. 21
0
def handler(event, context):
    class job:
        def __init__(self, title, company, summary):
            self.title = title
            self.company = company
            self.summary = summary

    search_string = event['queryStringParameters']['search']
    location = event['queryStringParameters']['state']

    client = ScraperAPIClient(os.environ.get('API_KEY'))
    URL = f"https://www.indeed.com/jobs?q={search_string}&l={location}"
    page = client.get(url=URL)
    soup = BeautifulSoup(page.content, 'html.parser')
    results = soup.find(id='resultsCol')
    job_elems = results.find_all('div', class_='jobsearch-SerpJobCard')

    jobs = {"jobs": []}
    for job_elem in job_elems:
        title_elem = job_elem.find('h2', class_='title')
        company_elem = job_elem.find('div', class_='sjcl')
        summary_elem = job_elem.find('div', class_='summary')
        jobs['jobs'].append(
            job(title_elem.text.strip(), company_elem.text.strip(),
                summary_elem.text.strip()))

    result = json.dumps(jobs, default=lambda x: x.__dict__)

    return {
        'statusCode': 200,
        'headers': {
            'Access-Control-Allow-Origin': '*',
            'Access-Control-Allow-Headers': '*',
            'Access-Control-Allow-Methods': 'OPTIONS,POST,GET'
        },
        'body': result.replace("\\n", " ")
    }
    def start_requests(self):
        secret = self.properties.properties['Scraper_secret']
        print("Properties:", self.properties.properties)
        scrape_url = "Scraper_Coop_scrape_url"
        client = ScraperAPIClient(secret)

        start_urls = list()
        priorities = list()
        categories = list()
        for i in self.properties.properties.keys():
            if str(i).startswith(scrape_url):
                property_value = self.properties.properties[i].split(";")
                start_urls.append(property_value[0])
                priorities.append(i.split("_")[4])
                categories.append(property_value[1])
        # print("Key start urls are:", start_urls, "Priorities:", priorities)

        for priority, url, category in zip(priorities, start_urls, categories):
            print("Calling yield function for url: ", url)
            yield scrapy.Request(client.scrapyGet(url=url),
                                 self.parse,
                                 dont_filter=True,
                                 priority=int(priority),
                                 meta={'category': category})
Esempio n. 23
0
def handler(event, context):
    client = ScraperAPIClient(os.environ.get('API_KEY'))
    monster_url = "https://www.monster.com/jobs/search?q=Software+Engineer&where=Nashville%2C+TN&page=6"
    page = client.get(url=monster_url, render=True)
    soup = BeautifulSoup(page.content, 'html.parser')
    results = soup.find(class_='results-page')
    job_elems = results.find_all('div', class_='results-card')
    titles = []
    summaries = []
    for job_elem in job_elems:
        title_elem = job_elem.find('div', class_='title-company-location')
        summary_elem = job_elem.find('div', class_='results-card-description')
        titles.append(title_elem.text.strip())
        summaries.append(summary_elem.text.strip())
    response = {"titles": titles, "summaries": summaries}
    return {
        'statusCode': 200,
        'headers': {
            'Access-Control-Allow-Headers': '*',
            'Access-Control-Allow-Origin': '*',
            'Access-Control-Allow-Methods': 'OPTIONS,POST,GET'
        },
        'body': json.dumps(response)
    }
Esempio n. 24
0
class ScraperApi(Base):
    def __init__(self, key: str, max_retry):
        self.max_retry = max_retry
        self.client = ScraperAPIClient(key)

    def get(self, url: str, headers: Dict[str, str]):
        if not headers:
            headers = {}

        if "User-Agent" not in headers.keys():
            headers["User-Agent"] = USER_AGENTS[randint(
                0,
                len(USER_AGENTS) - 1)]
        return self.client.get(url, headers, retry=self.max_retry).text

    def get_json(self, url: str, headers: Union[Dict[str, str], None]):
        return json.loads(self.get(url, headers))
Esempio n. 25
0
class GoogleNews:
    def __init__(self, key: str, lang="en", period="", ua=""):
        assert key != ""
        self.client = ScraperAPIClient(key)
        self.user_agent = ua
        self.__texts = []
        self.__titles = []
        self.__links = []
        self.__results = []
        self.__lang = lang
        self.__period = period
        self.__exec_time = 0

    def set_lang(self, lang):
        self.__lang = lang

    def search(self, q: Union[List[str], str], p: Union[List[int], int],
               start: datetime, end: datetime) -> List[dict]:
        """
        Searches for a term in google news and retrieves the first page into __results.

        Parameters:
        key = the search term
        """
        start_time = time()
        if isinstance(q, str):
            q = [q]
        if isinstance(p, int):
            p = [p]
        elif len(p) < 1:
            p = [1]

        for query in q:
            for page in p:
                out = self.scrape_page(query, page, start, end)
                for o in out:
                    if o["title"] not in self.__titles:
                        self.__results.append(o)
                        self.__links.append(o["link"])
                        self.__texts.append(o["title"] + " " + o["desc"])

        self.__exec_time = time() - start_time
        return self.__results

    def scrape_page(self,
                    q: str,
                    page: int,
                    start: datetime,
                    end: datetime,
                    attempts=0):
        """
        page = number of the page to be retrieved
        """
        payload = {
            'q': q,
            'lr': f'lang_{self.__lang}',
            'tbs': f"lr:lang_1{self.__lang}",
            'tbm': 'nws',
            'start': (10 * (page - 1)),
        }

        out: List[dict] = []

        if start is not None and end is not None:
            payload['tbs'] += f",cdr:1,cd_min:{start.strftime(DATE_FMT)}," \
                              f"cd_max:{end.strftime(DATE_FMT)}"

        try:
            page = self.client.get(url=GOOG_URL + "?" +
                                   parse.urlencode(payload)).text
            content = Soup(page, "html.parser")
        except Exception as e:
            attempts += 1
            if attempts > 5:
                print(f"ERROR TRYING TO LOAD CONTENT: {e}")
                raise e
            sleep(0.1 * attempts)
            self.scrape_page(q, page, start, end, attempts)
        try:
            result = content.find_all("div", id="search")[0].find_all("g-card")
        except IndexError:
            # no results were found
            return out

        for item in result:
            try:
                out.append({
                    "title":
                    item.find("div", {
                        "role": "heading"
                    }).text.replace("\n", ""),
                    "link":
                    item.find("a").get("href"),
                    "media":
                    item.findAll("g-img")[1].parent.text,
                    "date":
                    item.find("div", {
                        "role": "heading"
                    }).next_sibling.findNext('div').findNext('div').text,
                    "desc":
                    item.find("div", {
                        "role": "heading"
                    }).next_sibling.findNext('div').text.replace("\n", ""),
                    "image":
                    item.findAll("g-img")[0].find("img").get("src")
                })
            except Exception:
                pass
        return out

    def get_results(self) -> List[dict]:
        """Returns the __results."""
        return self.__results

    def get_text(self) -> List[str]:
        """Returns only the __texts of the __results."""
        return self.__texts

    def get_links(self) -> List[str]:
        """Returns only the __links of the __results."""
        return self.__links

    def clear(self):
        self.__texts = []
        self.__links = []
        self.__results = []
        self.__titles = []
        self.__exec_time = 0
Esempio n. 26
0
import scrapy
from http.cookies import SimpleCookie
from ..items import WgUpdateItem
from scrapy.loader import ItemLoader
import re
from scraper_api import ScraperAPIClient
from scraperapikey import key

client = ScraperAPIClient(key)


class WgSpider(scrapy.Spider):
    name = 'wg'
    cookies1 = {}
    ot, date_from, date_to, fur, city_id, start_date_value, end_date_value, api = '', '', '', '', '', '', '', ''

    def start_requests(self):

        rawdata = f" last_city={self.city_id}; last_cat=0; last_type=0"
        cookie = SimpleCookie()
        cookie.load(rawdata)
        for key, morsel in cookie.items():
            self.cookies1[key] = morsel.value

        url = f"https://www.wg-gesucht.de/en/wg-zimmer-in-cityname.{self.city_id}.0.1.0.html?offer_filter=1&city_id={self.city_id}&noDeact=1&dFr={self.date_from}&dTo={self.date_to}&categories%5B%5D=0&rent_types%5B%5D=0&ot%5B%5D={self.ot}"
        if (self.api == '1'):
            yield scrapy.Request(client.scrapyGet(url=url),
                                 cookies=self.cookies1,
                                 callback=self.parse)
        else:
            yield scrapy.Request(url=url,
Esempio n. 27
0
from scrapy import Request
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from scrapy_selenium import SeleniumRequest
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC

# from scrapy.utils.response import open_in_browser
from scraper_api import ScraperAPIClient

from ..items import CompanyItem
from dotenv import load_dotenv

load_dotenv()

client = ScraperAPIClient(os.environ["SCRAPER_API"])


class BbbSpiderSpider(CrawlSpider):
    name = "bbb_spider"

    def start_requests(self):
        with open("zip_cat_list3.csv") as file:
            dreader = DictReader(file)
            zip_cat = list(dreader)

        for req in zip_cat:
            print("-" * 40)
            print(req["zip_code"])

            # count URLs in the scheduler
Esempio n. 28
0
import proxies
import requests
from proxies import random_proxy
from scraper_api import ScraperAPIClient
import random
def random_line(fname):
    lines = open(fname).read().splitlines()
    return random.choice(lines)

client = ScraperAPIClient('d0224166b175ddca1f18dd5b5cca66a5')
result = client.get(url = 'http://httpbin.org/headers', 
	headers={
	'useragent':random_line('user-agents.txt') }
	)
print(result.text);


Esempio n. 29
0
class snkrs:
    def __init__(self) -> None:
        self.products = {}

    def getInfoInnvictus(self, apiKey='aba275ef5f8f713e086a1a0ab240dd5c'):
        """ Method to retrieve a dict given launching shoes on innvictus.com/lanzamientos """

        self.client = ScraperAPIClient(apiKey)

        result = self.client.get(url='https://www.innvictus.com/lanzamientos')
        assert result.status_code == 200, f"Status code {result.status_code}"

        soup = BeautifulSoup(result.content, 'html.parser')
        links = soup.find('body').find_all("script",
                                           attrs={'type': 'text/javascript'})
        s = links[3]  # links[3] element gives var products
        productVar = re.search(r'\'(.*?)\'', str(s))
        productDict = json.loads(
            productVar.group(0).replace("'{\"id",
                                        "[{\"id").replace("\"}'", "\"}]"))
        #print(productVar.group(0).replace("'{\"id","[{\"id").replace("\"}'","\"}]"))

        for tenis in productDict:
            tenis['url'] = "https://www.innvictus.com/p/{id}".format(
                id=tenis['id'])
        self.products = productDict

    def selectSaveTargetShoes(self, save=False):
        i = 0
        for shoes in self.products:
            print(f"Selelction {i}")
            print("Shoe: ", shoes['name2'])
            print("Launching date: ", shoes['realdate'])
            print("Price: ", shoes['price'], "\n")
            i += 1
        selectedShoe = [
            int(x) for x in input("Select shoe number to track\n").split()
        ]

        #print(selectedShoe)
        def takeFromDict(dct, listElements):
            return [dct[element] for element in listElements]

        self.products = takeFromDict(self.products, selectedShoe)
        if save == True:
            with open('TargetShoes.txt', 'w') as f:
                for shoe in self.products:
                    f.write(shoe['url'])
                    f.write("\n")

    def isAvailable(self, fileName=None):
        def sendDiscordMessage(self, message='Hello world'):
            from discord import Webhook, RequestsWebhookAdapter
            webhook = Webhook.from_url(
                "https://discord.com/api/webhooks/811294684386426890/3SY7GtmBAwyjDM6qr73eDRqrzjRJZ2u0vlShoyOvvf_cCUNvv6YJqiGPI1udVWWqipVp",
                adapter=RequestsWebhookAdapter())
            webhook.send(message)

        if fileName == None:
            for url in self.products:
                notAvailable = self.client.get(url['url'])
                assert notAvailable.status_code == 200, f"Status code {notAvailable.status_code}"
                soupNotAvailable = BeautifulSoup(notAvailable.content,
                                                 'html.parser')
                try:
                    notFoundClass = soupNotAvailable.find('body').find(
                        "div", attrs={
                            'class': 'pdp-notFound'
                        }).text.strip()
                    notFoundTitle = soupNotAvailable.find('title').text.strip()
                except:
                    notFoundClass = ''
                    notFoundTitle = ''
                if "No encontrado" in notFoundTitle or "Este producto no" in notFoundClass:
                    url['Available'] = False
                    message = url['name2'], " no disponible aun"
                    print(message)
                    return False

                else:
                    message = url['name2'], " disponible", url['url']
                    print(message)
                    url['Available'] = True
                    sendDiscordMessage(self, message=message)
                    return True
        else:
            with open(fileName, 'r') as targetShoes:
                urls = [url.strip() for url in targetShoes]
                for url in urls:
                    notAvailable = self.client.get(url, headers=self.headers)
                    soupNotAvailable = BeautifulSoup(notAvailable.content,
                                                     'html.parser')
                    try:
                        notFoundClass = soupNotAvailable.find('body').find(
                            "div", attrs={
                                'class': 'pdp-notFound'
                            }).text.strip()
                        notFoundTitle = soupNotAvailable.find(
                            'title').text.strip()
                    except:
                        notFoundClass = ''
                        notFoundTitle = ''
                    if "No encontrado" in notFoundTitle or "Este producto no" in notFoundClass:
                        print(url, " no disponible aun\n")
                    else:
                        print(url, " disponible \n")
class TruliaSpider(scrapy.Spider):
    client = ScraperAPIClient('22c786c81d0f6a84eb1312a0d6c6aec5')
    name = 'trulia'
    allowed_domains = ['trulia.com']
    custom_settings = {'FEED_URI': '/tmp/data/data_for_rent_%(time)s.jl',
                       'FEED_FORMAT': 'jsonlines'}

    def __init__(self, state='CA', cities=['Oakland','Alameda','Berkeley','Emeryville'], *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.state = state
        self.cities = cities
        url = 'https://trulia.com/for_rent/{city},{state}/2000-4000_price/1300p_sqft/'
        self.base_urls = [url.format(state=state, city=city) for city in self.cities]
        self.start_urls = [self.client.scrapyGet(url=url) for url in self.base_urls]
        self.le = LinkExtractor(restrict_xpaths='//*[@id="resultsColumn"]/div[1]')

    def parse(self, response):
        N = self.get_number_of_pages_to_scrape(response)
        self.logger.info("Determined that property pages are contained on {N} different index pages, each containing at most 30 properties. Proceeding to scrape each index page...".format(N=N))
        self.logger.info("Response URL is {}".format(response.url))
        for base_url in self.base_urls:
            for url in [base_url + "{n}_p/".format(n=n) for n in range(1, N+1)]:
                yield scrapy.Request(url=self.client.scrapyGet(url), callback=self.parse_index_page, dont_filter=True)

    @staticmethod
    def get_number_of_pages_to_scrape(response):
        pagination = response.xpath('//*[@data-testid="pagination-caption"]/text()')
        if pagination.re(r'1-30 of ([\d,]+) Results'):
            number_of_results = int(pagination.re(r'1-30 of ([\d,]+) Results')[0])
            return math.ceil(number_of_results/30)
        else:
            return 1

    def parse_index_page(self, response):
        links = self.le.extract_links(response)
        for link in links:
            link.url = link.url.replace('https://api.scraperapi.com','https://trulia.com')
            yield scrapy.Request(url=self.client.scrapyGet(link.url), callback=self.parse_property_page, dont_filter=True)

    def parse_property_page(self, response):
        l = TruliaItemLoader(item=TruliaItem(), response=response)
        self.load_common_fields(item_loader=l, response=response)

        item = l.load_item()
        self.post_process(item=item)
        return item

    @staticmethod
    def load_common_fields(item_loader, response):
        '''Load field values which are common to "on sale" and "recently sold" properties.'''
        item_loader.add_value('url', urllib.parse.unquote(response.url[32:-60]))
        item_loader.add_xpath('address', '//*[@data-testid="home-details-summary-headline"]/text()')
        item_loader.add_xpath('city_state', '//*[@data-testid="home-details-summary-city-state"]/text()')
        item_loader.add_xpath('price', '//*[@data-testid="on-market-price-details"]/div/text()', re=r'\$([\d,]+)')
        item_loader.add_xpath('neighborhood', '//*[@data-testid="neighborhood-link"]/text()')
        item_loader.add_xpath('neighborhood_url', '//*[@data-testid="neighborhood-link"]/@href')

        fact_list = item_loader.nested_xpath('//*[@data-testid="facts-list"]')
        fact_list.add_xpath('bedrooms', xpath='.//*[@data-testid="bed"]/div/div[2]/text()', re=r'(\d+) (?:Beds|Bed|beds|bed)$')
        fact_list.add_xpath('bathrooms', xpath='.//*[@data-testid="bath"]/div/div[2]/text()', re=r'(\d+\.?[0-9]*) (?:Baths|Bath|baths|bath)$')
        fact_list.add_xpath('area', xpath='.//*[@data-testid="floor"]/div/div[2]/text()', re=r'([\d, -]+)')

        item_loader.add_xpath('telephone', '//*[@data-testid="home-description-text-description-phone-number"]/div/div[2]/text()')
        item_loader.add_xpath('description', '//*[@data-testid="home-description-text-description-text"]/text()')

        item_loader.add_xpath('tags','//*[@data-testid="hero-image-property-tag-1"]/span/text()')
        item_loader.add_xpath('tags','//*[@data-testid="hero-image-property-tag-2"]/span/text()')
        item_loader.add_xpath('tags','//*[@data-testid="hero-image-property-tag-3"]/span/text()')
        item_loader.add_xpath('tags','//*[@data-testid="hero-image-property-tag-4"]/span/text()')
        item_loader.add_xpath('tags','//*[@data-testid="hero-image-property-tag-5"]/span/text()')

        features = item_loader.nested_xpath('//*[@data-testid="structured-amenities-table-category"]')
        #features without a sub-header are 1 div below /table/tbody/tr/td
        features.add_xpath('features', xpath='.//table/tbody/tr/td/div/li/text()')
        #features with a sub-header are 2 divs below /table/tbody/tr/td
        #should we try to get the sub-header and associate it to the value? it would be in .//table/tbody/tr/td/div/div/text()
        features.add_xpath('features', xpath='.//table/tbody/tr/td/div[2]/li/text()')

        item_loader.add_xpath('attribute_values', '//*[@data-testid="wls-attribute"]/div/div/div/div/div/div[2]/div/text()', re=r'\d+')
        item_loader.add_xpath('attribute_names', '//*[@data-testid="wls-attribute"]/div/div/div[2]/div/div[2]/div/text()')



    @staticmethod
    def post_process(item):
        '''Add any additional data to an item after loading it'''
        s8 = ['section 8', 'section8', 'GoSection8.com']
        np = ['no pet', 'no pets', 'pets not allowed', 'no pets allowed','pets are not allowed']
        p = ['dog', 'cat', 'cats','small dogs', 'pet considered','pets considered','pets allowed','pets ok','pets okay','pets negotiable']

        item['features'] = list(dict.fromkeys(item.get('features',[]))) #remove list duplicates
        features = [str.casefold(f) for f in item.get('features',[])]
        description = [str.casefold(d) for d in item.get('description',[])]
        f_d = features + description
        tags = item.get('tags',[])

        section8 = [item for s8_phrase in s8 for item in f_d if s8_phrase in item]
        item['section8'] = False if not section8 else True

        if 'INCOME RESTRICTED' in tags or 'SENIOR HOUSING' in tags:
            item['section8'] = True

        no_pets = [item for np_phrase in np for item in f_d if np_phrase in item]
        yes_pets = [item for p_phrase in p for item in f_d if p_phrase in item]

        if no_pets:
            item['pets'] = False
        elif yes_pets:
            item['pets'] = True
        else:
            item['pets'] = None

        if 'PET FRIENDLY' in tags:
            item['pets'] = True

        if 'FURNISHED' in tags:
            item['furnished'] = True

        deposit = ' '.join([i for i in f_d if 'deposit' in i])
        deposit = [i for i in deposit.split() if i.replace('$','').replace(',','').isdigit()]
        if deposit:
            item['deposit'] = int(deposit[0].replace('$','').replace(',',''))

        year_built = ' '.join([i for i in f_d if 'year built' in i])
        year_built = [i for i in year_built.split() if i.isdigit()]
        if year_built:
            item['year_built'] = int(year_built[0])

        property_type = ' '.join([i for i in f_d if 'property type' in i])
        if property_type:
            item['property_type'] = property_type.replace('property type: ','')

        days_on_market = ' '.join([i for i in f_d if 'days on market' in i])
        days_on_market = [i for i in days_on_market.split() if i.isdigit()]
        if days_on_market:
            item['days_on_market'] = int(days_on_market[0])

        parking = ' '.join([i for i in f_d if 'parking' in i])
        if parking:
            item['parking'] = parking.replace('parking: ','')

        floors = ' '.join([i for i in f_d if 'floors' in i])
        if floors:
            item['floors'] = floors.replace('floors: ','')

        heating = ' '.join([i for i in f_d if 'heating' in i])
        if heating:
            item['heating'] = heating.replace('heating: ','')

        ac = [i for i in f_d if 'air conditioning' in i]
        if ac:
            item['ac'] = True

        fitness = [i for i in f_d if 'fitness center' in i]
        if fitness:
            item['fitness'] = True

        if 'attribute_names' in item and 'attribute_values' in item:
            item['attributes'] = {}
            for idx, val in enumerate(item['attribute_names']):
                item['attributes'][val] = item['attribute_values'][idx]

            del item['attribute_names']
            del item['attribute_values']