示例#1
0
    async def start(self):
        first_pages = [
            self.search_link_format.format(category=category, page_number=1)
            for category in self.categories.keys()
        ]

        cat_max_pages = list()

        tasks = (self.extract_async(url) for url in first_pages)
        for page in AsyncCrawler.limited_as_completed(tasks, 5):
            url, page_content = await page

            # Get max pages for this category
            max_page = 1
            soup = BeautifulSoup(page_content, 'html.parser')
            pagination = soup.find("div", {"class": "pagination"})
            if pagination is not None:

                for page_num in pagination.findAll("a"):
                    next_page_href = page_num.get("href").split('/')[-1]
                    if next_page_href is not None:
                        search = re.search("strona-([0-9]+)", next_page_href,
                                           re.IGNORECASE)
                        if search is not None:
                            max_page = max(max_page, int(search.group(1)))

            category = url.split('/')[-3]
            # await self.crawl_pages(category, max_page)
            cat_max_pages.append((category, max_page))

        tasks = (self.crawl_pages(cat, max_page)
                 for cat, max_page in cat_max_pages)
        for page in AsyncCrawler.limited_as_completed(tasks):
            await page
示例#2
0
    def __init__(self, max_concurrency=200):
        AsyncCrawler.__init__(self, max_concurrency)

        self.site_url = "https://aukcje.ideagetin.pl"
        self.search_link_format = "https://aukcje.ideagetin.pl/aukcje/{category}/widok-lista/strona-{page_number}"

        self.output_dir_path_format = os.path.join(
            os.path.dirname(os.path.abspath(__file__)), "Ideagetin",
            "{category}")

        log.debug("Output directory path format: %s" %
                  self.output_dir_path_format)

        # category name => catid
        self.categories = {
            "pojazdy-samochodowe-i-motocykle": 1,
            "maszyny-budowlane": 2,
            "przyczepy-naczepy": 3,
            "gastronomia-i-meble": 4,
            "maszyny-rolnicze-i-nbsp-lesne": 5,
            "medycyna-i-kosmetyka": 6,
            "wozki-widlowe": 7,
            "inne-maszyny-i-nbsp-urzadzenia": 8,
            "maszyny-produkcyjne": 9,
            "sport-i-rekreacja": 10
        }

        self.flags = {
            # 0. new record
            "new": 0,
            # 1. updated (new price, new date)
            "updated": 1,
            # 2. ended, sold (no renew)
            "sold": 2
        }

        self.fields = [("id", str), ("link", str), ("category_id", int),
                       ("category", str), ("title", str), ("start", str),
                       ("stop", str), ("type", str), ("images", str),
                       ("parameters", str), ("description", str),
                       ("price", float), ("flag", int)]

        self.field_names = [field_name for field_name, _ in self.fields]
示例#3
0
    def __init__(self, max_concurrency=200):
        AsyncCrawler.__init__(self, max_concurrency)

        self.site_url = "https://aukcje.efl.com.pl"
        self.search_link_format = \
            "https://aukcje.efl.com.pl/AuctionList?category={cat_id}&page={page_no}&sort=Title-asc"

        self.output_dir_path_format = os.path.join(
            os.path.dirname(os.path.abspath(__file__)), "Efl", "{category}")

        log.debug("Output directory path format: %s" %
                  self.output_dir_path_format)

        # category name => catid
        self.categories = {
            "Pojazdy/Osobowe": 1,
            "Pojazdy/Dostawcze": 1,
            "Pojazdy/Ciezarowe": 1,
            "Pojazdy/Naczepy-i-przyczepy": 1,
            "Pojazdy/Motocykle-i-quady": 1,
            "Pojazdy/Pozostale": 1,
            "Carefleet/Osobowe": 1,
            "Carefleet/Dostawcze": 1,
            "Carefleet/Pozostale": 1,
        }

        self.flags = {
            # 0. new record
            "new": 0,
            # 1. updated (new price, new date)
            "updated": 1,
            # 2. ended, sold (no renew)
            "sold": 2
        }

        self.fields = [("id", str), ("link", str), ("category_id", int),
                       ("category", str), ("title", str), ("start", str),
                       ("stop", str), ("type", str), ("images", str),
                       ("parameters", str), ("description", str),
                       ("price", float), ("flag", int)]

        self.field_names = [field_name for field_name, _ in self.fields]
示例#4
0
                self.sent = self.sent[-2000:]
            for link_ in self.sent + links:
                f.write(link_ + '\n')
        self.put_new_tweets_for_the_bird(bird_food)
        print('Ignored {} links as previously sent.'.format(ignored))

    def put_new_tweets_for_the_bird(self, urls):
        with open('new.dat', 'w') as f:
            for link_ in urls:
                f.write(link_ + '\n')


while True:
    try:
        timeout = 900
        ds = Seeker()
        crawler = AsyncCrawler()
        start = time.time()
        freebies = crawler.crawl()
        stop = time.time() - start
        print('Found {} links in {}s.'.format(len(freebies), stop))
        ds.process_stuff(freebies)
        total_stop = time.time() - start
        print('Did full cycle in {}s. Going to sleep for {}s'.format(
            total_stop, timeout))
        time.sleep(timeout)
    except KeyboardInterrupt:
        raise
    except:
        print('Failure: ', sys.exc_info()[0])
    def __init__(self, max_concurrency=200):
        AsyncCrawler.__init__(self, max_concurrency)

        self.site_url = "https://aukcje.pkoleasing.pl/en/"
        self.search_category_url_format = \
            "https://aukcje.pkoleasing.pl/en/auctions/list/pub/all/{category}/all?page={page_number}"

        self.output_dir_path_format = os.path.join(
            os.path.dirname(os.path.abspath(__file__)), "Pkoleasing",
            "{category}")

        log.debug("Output directory path format: %s" %
                  self.output_dir_path_format)

        chrome_options = Options()
        chrome_options.add_argument("--headless")
        chrome_options.add_argument("--no-sandbox")
        chrome_options.add_argument("--disable-dev-shm-usage")

        self.driver = webdriver.Chrome(executable_path="/bin/chromedriver",
                                       options=chrome_options)

        # category name => catid
        self.categories = {
            "vehicles": 1,
            "ecr_machinery": 2,
            "ecr_agricultural": 3,
            "ecr_industrial": 4,
            "ecr_medic": 5,
            "ecr_trailers1": 6,
            "ecr_bus": 7,
            "ecr_motorcycles": 8,
            "ecr_other1": 9
        }

        self.flags = {
            # 0. new record
            "new": 0,
            # 1. updated (new price, new date)
            "updated": 1,
            # 2. ended, sold (no renew)
            "sold": 2
        }

        self.fields = [
            ("id", str),
            ("link", str),
            ("category_id", int),
            ("category", str),
            ("title", str),
            ("start", str),
            ("stop", str),
            ("type", str),
            ("images", str),
            ("parameters", str),
            ("description", str),
            ("price_pln", float),
            ("price_buy_now_pln", float),
            ("price_pln_brutto", float),
            ("price_euro", float),
            # 'mileage",
            ("flag", int)
        ]

        self.field_names = [field_name for field_name, _ in self.fields]
    async def crawl_pages(self, category, max_pages):
        pages = (self.search_category_url_format.format(
            category=category, page_number=page_number)
                 for page_number in range(1, max_pages + 1))

        auctions_links = list()

        tasks = (self.extract_async(url) for url in pages)
        for page in AsyncCrawler.limited_as_completed(tasks, 5):
            url, page_content = await page
            if url is not None and page_content is not None:
                auctions_links.extend(
                    self.parse_search_result_page(page_content))

        if not auctions_links:
            log.warning("No results found for category: %s" % category)
            return

        log.debug("Found: %d auctions in %d pages of category: %s" %
                  (len(auctions_links), max_pages, category))

        output_dir = self.output_dir_path_format.format(category=category)
        csv_file_path = os.path.join(
            output_dir, "{category}.csv".format(category=category))

        Util.create_directory(output_dir)

        csv_manager = CsvManager(csv_file_path, self.fields, "id")
        csv_manager.open_file()

        for auction_url in auctions_links:
            self.driver.get(auction_url)

            extracted_data = self.parse_data(category, auction_url,
                                             self.driver.page_source)
            if csv_manager.check_row_exist(extracted_data):
                log.debug("row already existed in csv")
                extracted_data["flag"] = self.flags.get("updated")
            else:
                log.debug("row in new")
                extracted_data["flag"] = self.flags.get("new")

            csv_manager.update_row(extracted_data)

            auction_output_dir = os.path.join(output_dir,
                                              extracted_data.get("id"))
            Util.create_directory(auction_output_dir)

            if extracted_data.get("images") is not None:
                images_urls = extracted_data.get("images").split('|')

                local_img = list()

                for img_url in images_urls:
                    local_img_file_path = os.path.join(
                        auction_output_dir, "{img_id}.png".format(
                            img_id=self.get_image_id(img_url)))

                    if not Util.check_file_exist(local_img_file_path):
                        local_img.append((img_url, local_img_file_path))

                download_tasks = (self.download_file(img_url, img_file_path)
                                  for img_url, img_file_path in local_img)

                for r in AsyncCrawler.limited_as_completed(download_tasks):
                    await r

        csv_manager.close_file()
示例#7
0
 async def start(self):
     tasks = (self.crawl_pages(category) for category in self.categories)
     for res in AsyncCrawler.limited_as_completed(tasks):
         await res
示例#8
0
    def __init__(self, max_concurrency=200):
        AsyncCrawler.__init__(self, max_concurrency)

        self.site_url = "https://portalaukcyjny.mleasing.pl/"
        self.offer_url_format = "https://portalaukcyjny.mleasing.pl/#/offer/{offer_id}/details"
        # self.search_category_url_format = \
        #     "https://portalaukcyjny.mleasing.pl/api/offer-read/search" \
        #     "?selectedCategory={cat_id}" \
        #     "&offerType%5B%5D=3" \
        #     "&$orderBy=AuctionType%20asc,IsPromoted%20desc,Id%20desc" \
        #     "&$skip={skip}"\
        #     "&$top={max_num_of_results}"

        self.search_category_url_format = \
            "https://portalaukcyjny.mleasing.pl/api/offer-read/search" \
            "?selectedCategory={cat_id}" \
            "&offerType%5B%5D=3" \
            "&$filter=(1%20eq%201)%20and%20((IsBuyNow%20eq%20true)%20or" \
            "%20(AuctionType%20eq%20Mleasing.SHL.Contracts.Enums.AuctionType%272%27))" \
            "&$orderBy=AuctionType%20asc,IsPromoted%20desc,Id%20desc" \
            "&$skip={skip}" \
            "&$top={max_num_of_results}"

        self.get_images_api_url_format = \
            "https://portalaukcyjny.mleasing.pl/api/offer-read/get-images?id={auction_id}"

        self.get_image_api_url_format = \
            "https://portalaukcyjny.mleasing.pl/api/offer-read/get-image?id={img_id}"

        self.output_dir_path_format = os.path.join(
            os.path.dirname(os.path.abspath(__file__)), "Mleasing",
            "{category}")

        log.debug("Output directory path format: %s" %
                  self.output_dir_path_format)

        # category name => catid
        self.categories = {
            # "Wszystkie": 0,
            "Auto": 1,  # "Osobowe"
            "Vans": 2,  # "Dostawcze"
            "Lorry": 3,  # "Ciężarowe"
            "Devices": 4,  # "Urządzenia"
            "Medical": 5,  # "Medyczne"
            "Building": 6  # "Budowlane": 6,
            # "Other" # "Inne": 7
        }

        self.flags = {
            # 0. new record
            "new": 0,
            # 1. updated (new price, new date)
            "updated": 1,
            # 2. ended, sold (no renew)
            "sold": 2
        }

        self.types = {"auction": 1, "buynow": 2, "sell offer": 3}

        self.fields = [
            ("id", str),
            ("link", str),
            ("category_id", int),
            ("category", str),
            ("title", str),
            ("start", str),
            ("stop", str),
            ("type", int),
            ("images", str),
            ("parameters", str),
            ("description", str),
            ("price_pln", float),
            ("price_buy_now_pln", float),
            # 'mileage",
            ("flag", int)
        ]

        self.field_names = [field_name for field_name, _ in self.fields]
示例#9
0
    async def crawl_pages(self, category):
        cat_id = self.categories.get(category)
        offset = 0
        max_results = 50
        auctions = list()

        while True:
            url = self.search_category_url_format.format(
                cat_id=cat_id, skip=offset, max_num_of_results=max_results)
            _, page_content = await self.extract_async(url)
            if page_content is not None:
                json_obj = json.loads(page_content.decode("utf-8"),
                                      encoding="utf-8")

                items = json_obj.get("Items")
                auctions.extend(items)

            offset += max_results

            if len(items) < max_results:
                break

        log.debug("Found: %d auctions of category: %s" %
                  (len(auctions), category))

        output_dir = self.output_dir_path_format.format(category=category)
        csv_file_path = os.path.join(
            output_dir, "{category}.csv".format(category=category))

        log.info("Csv output directory path: %s, csv file: %s" %
                 (output_dir, csv_file_path))

        Util.create_directory(output_dir)

        csv_manager = CsvManager(csv_file_path, self.fields, "id")
        csv_manager.open_file()

        tasks = (self.parse_item(category, item) for item in items)
        for res in AsyncCrawler.limited_as_completed(tasks, 5):
            extracted_data = await res

            if csv_manager.check_row_exist(extracted_data):
                extracted_data["flag"] = self.flags.get("updated")
            else:
                extracted_data["flag"] = self.flags.get("new")

            csv_manager.update_row(extracted_data)

            auction_output_dir = os.path.join(output_dir,
                                              extracted_data.get("id"))
            Util.create_directory(auction_output_dir)

            if extracted_data.get("images") is not None:
                images_urls = extracted_data.get("images").split('|')

                local_img = list()

                for img_url in images_urls:
                    local_img_file_path = os.path.join(
                        auction_output_dir, "{img_id}.jpg".format(
                            img_id=self.get_image_id(img_url)))

                    if not Util.check_file_exist(local_img_file_path):
                        local_img.append((img_url, local_img_file_path))

                download_tasks = (self.download_file(img_url, img_file_path)
                                  for img_url, img_file_path in local_img)

                for r in AsyncCrawler.limited_as_completed(download_tasks):
                    await r

        csv_manager.close_file()
示例#10
0
    async def crawl_pages(self, category, max_pages):
        pages = (self.search_link_format.format(category=category,
                                                page_number=page_number)
                 for page_number in range(1, max_pages + 1))

        auctions_links = list()

        tasks = (self.extract_async(url) for url in pages)
        for page in AsyncCrawler.limited_as_completed(tasks, 5):
            url, page_content = await page
            if url is not None and page_content is not None:
                auctions_links.extend(
                    self.parse_search_result_page(page_content))

        if not auctions_links:
            log.warning("No results found for category: %s" % category)
            return

        log.debug("Found: %d auctions in %d pages of category: %s" %
                  (len(auctions_links), max_pages, category))

        output_dir = self.output_dir_path_format.format(category=category)
        csv_file_path = os.path.join(
            output_dir, "{category}.csv".format(category=category))

        Util.create_directory(output_dir)

        csv_manager = CsvManager(csv_file_path, self.fields, "id")
        csv_manager.open_file()
        '''
        tasks = (self.extract_multi_async([url.replace("aukcja", "zdjecia"), url]) for url in auctions_links)
        for pages in AsyncCrawler.limited_as_completed(tasks):
            results = await pages
            images_url, images_page_content = results[0]
            url, page_content = results[1]
        '''
        tasks = (self.extract_async(url) for url in auctions_links)
        for page in AsyncCrawler.limited_as_completed(tasks, 5):
            url, page_content = await page
            if url is not None and page_content is not None:
                extracted_data = self.parse_data(category, url, page_content)

                images_links = list()
                images_url = url.replace("aukcja", "zdjecia")
                _, images_page_content = await self.extract_async(images_url)
                if images_url is not None and images_page_content is not None:
                    images_links = self.parse_full_images_page(
                        images_page_content)
                    extracted_data["images"] = '|'.join(images_links)

                if csv_manager.check_row_exist(extracted_data):
                    if _translate.get("finished") in extracted_data.get(
                            "stop").lower():
                        extracted_data["flag"] = self.flags.get("sold")
                    else:
                        extracted_data["flag"] = self.flags.get("updated")
                else:
                    extracted_data["flag"] = self.flags.get("new")

                csv_manager.update_row(extracted_data)

                auction_output_dir = os.path.join(output_dir,
                                                  extracted_data.get("id"))
                Util.create_directory(auction_output_dir)

                if extracted_data.get("images") is not None:
                    images_urls = extracted_data.get("images").split('|')

                    local_img = list()

                    for img_url in images_urls:
                        local_img_file_path = os.path.join(
                            auction_output_dir, "{img_id}.jpg".format(
                                img_id=self.get_image_id(img_url)))

                        if not Util.check_file_exist(local_img_file_path):
                            local_img.append((img_url, local_img_file_path))

                    download_tasks = (self.download_file(
                        img_url, img_file_path)
                                      for img_url, img_file_path in local_img)

                    for r in AsyncCrawler.limited_as_completed(download_tasks):
                        await r

            else:
                logging.error("Url or page_content none: %s" % url)

        csv_manager.close_file()
示例#11
0
    parser.add_argument('---url-prefix',
                        default=DEFAULT_PREFIX,
                        help='''Prefix to accept.''')
    parser.add_argument(
        '---batch-size',
        default=10000,
        help='''Number of entries to hold in memory before writing file''')

    return parser.parse_args()


if __name__ == '__main__':
    ARGS = parse_args()
    load = ARGS.load_data
    root_url = ARGS.base_url
    prefix = ARGS.url_prefix
    batch_size = ARGS.batch_size

    loop = asyncio.get_event_loop()
    task = AsyncCrawler(root_url=root_url,
                        out_file='sitemap.xml',
                        batch_size=batch_size,
                        prefix=prefix,
                        load=load)
    loop.run_until_complete(task.run())

    try:
        loop.add_signal_handler(signal.SIGINT, loop.stop)
    except RuntimeError:
        pass