コード例 #1
0
 def parse(self, response):
     """
     crawl theater list data first
     """
     theater_list = response.xpath(
         '//footer/p[position()>=2 and position() <=3]//a')
     # partner cinema is not included
     for theater_element in theater_list:
         curr_cinema_url = theater_element.xpath(
             './@href').extract_first()
         cinema_name = theater_element.xpath('./text()').extract_first()
         data_proto = ShowingLoader(response=response)
         data_proto.add_cinema_name(cinema_name)
         cinema_name = data_proto.get_output_value('cinema_name')
         data_proto.add_cinema_site(curr_cinema_url, cinema_name)
         data_proto.add_value('source', self.name)
         if not self.is_cinema_crawl([cinema_name]):
             continue
         cinema_name_en = curr_cinema_url.split('/')[-1].split('?')[0]
         request = scrapy.Request(
             curr_cinema_url, callback=self.parse_main_page)
         request.meta["data_proto"] = data_proto.load_item()
         request.meta["cinema_name_en"] = cinema_name_en
         request.meta["dont_merge_cookies"] = True
         yield request
コード例 #2
0
 def parse(self, response):
     """
     crawl theater list data first
     """
     theater_list = response.xpath('//div[@class="theater_info"]//li/a')
     for theater_element in theater_list:
         curr_cinema_url = theater_element.xpath(
             './@href').extract_first()
         cinema_name = theater_element.xpath('./text()').extract_first()
         if not cinema_name:
             # partner theater element is different
             cinema_name = ''.join(theater_element.xpath(
                 './/text()').extract())
         else:
             curr_cinema_url = response.urljoin(curr_cinema_url)
         data_proto = ShowingLoader(response=response)
         data_proto.add_cinema_name(cinema_name)
         cinema_name = data_proto.get_output_value('cinema_name')
         data_proto.add_cinema_site(curr_cinema_url, cinema_name)
         data_proto.add_value('source', self.name)
         if not self.is_cinema_crawl([cinema_name]):
             continue
         request = scrapy.Request(
             curr_cinema_url, callback=self.parse_cinema)
         request.meta["data_proto"] = data_proto.load_item()
         yield request
コード例 #3
0
 def parse(self, response):
     """
     crawl theater list data first
     """
     theater_list = response.xpath(
         '//section[@class="rcol searchTheater"]//li')
     for theater_element in theater_list:
         if theater_element.xpath('./@class').extract_first() == "area":
             continue
         curr_cinema_url = theater_element.xpath(
             './a/@href').extract_first()
         cinema_img = theater_element.xpath('./img/@src').extract_first()
         cinema_name = theater_element.xpath('./a/img/@alt').extract_first()
         if cinema_img is not None:
             if "icon_uc_ss.gif" in cinema_img:
                 cinema_name = "ユナイテッド・シネマ" + cinema_name
             elif "icon_cpx_ss.gif" in cinema_img:
                 cinema_name = "シネプレックス" + cinema_name
         data_proto = ShowingLoader(response=response)
         data_proto.add_cinema_name(cinema_name)
         cinema_name = data_proto.get_output_value('cinema_name')
         data_proto.add_cinema_site(
             response.urljoin(curr_cinema_url), cinema_name)
         data_proto.add_value('source', self.name)
         if not self.is_cinema_crawl([cinema_name]):
             continue
         cinema_name_en = curr_cinema_url.split('/')[-2]
         schedule_url = self.generate_cinema_schedule_url(
             cinema_name_en, self.date)
         request = scrapy.Request(schedule_url, callback=self.parse_cinema)
         request.meta["data_proto"] = data_proto.load_item()
         yield request
コード例 #4
0
 def parse(self, response):
     """
     crawl theater list data first
     """
     theater_div_list = response.xpath(
         '//div[@class="theater-list__inner"]')
     for theater_element in theater_div_list:
         # forum site have multiple cinema on one site, so we need to
         # specify cinema name on schedule page
         cinema_name = theater_element.xpath('./h4/text()').extract_first()
         data_proto = ShowingLoader(response=response)
         data_proto.add_cinema_name(cinema_name)
         cinema_name = data_proto.get_output_value('cinema_name')
         if not self.is_cinema_crawl([cinema_name]):
             continue
         curr_cinema_url = theater_element.xpath(
             './p/a/@href').extract_first()
         data_proto.add_cinema_site(response.urljoin(curr_cinema_url),
                                    cinema_name)
         data_proto.add_value('source', self.name)
         schedule_url = self.generate_cinema_schedule_url(
             curr_cinema_url, self.date)
         request = scrapy.Request(schedule_url, callback=self.parse_cinema)
         request.meta["data_proto"] = data_proto.load_item()
         yield request
コード例 #5
0
    def parse_showing(self, response, curr_showing, data_proto, result_list):
        def parse_time(time_str):
            time = time_str.split(":")
            return (int(time[0]), int(time[1]))

        showing_data_proto = ShowingLoader(response=response)
        showing_data_proto.add_value(None, data_proto.load_item())
        screen_name = curr_showing.xpath('./th/div/text()').extract_first()
        showing_data_proto.add_screen_name(screen_name)
        start_time = curr_showing.xpath(
            './td[@class="time"]/div/text()').extract_first()
        start_hour, start_minute = parse_time(start_time)
        showing_data_proto.add_value(
            'start_time', self.get_time_from_text(start_hour, start_minute))
        end_time = curr_showing.xpath(
            './td[@class="time"]/div/span/text()').extract_first()[1:]
        end_hour, end_minute = parse_time(end_time)
        showing_data_proto.add_value(
            'end_time', self.get_time_from_text(end_hour, end_minute))
        showing_data_proto.add_value('seat_type', 'NormalSeat')

        # query screen number from database
        showing_data_proto.add_total_seat_count()
        # check whether need to continue crawl booking data or stop now
        if not self.crawl_booking_data:
            result_list.append(showing_data_proto.load_item())
            return

        booking_data_proto = init_show_booking_loader(response=response)
        booking_data_proto.add_value('showing', showing_data_proto.load_item())
        book_status = curr_showing.xpath(
            './/img[contains(@src,"icon_seat_vacant")]/@alt').extract_first()
        booking_data_proto.add_book_status(book_status, util=KoronaUtil)
        book_status = booking_data_proto.get_output_value('book_status')
        if book_status in ['SoldOut', 'NotSold']:
            # sold out or not sold
            total_seat_count = showing_data_proto.get_output_value(
                'total_seat_count')
            book_seat_count = (total_seat_count
                               if book_status == 'SoldOut' else 0)
            booking_data_proto.add_value('book_seat_count', book_seat_count)
            booking_data_proto.add_time_data()
            result_list.append(booking_data_proto.load_item())
            return
        else:
            # normal, need to crawl book number on order page
            url = curr_showing.xpath(
                './td[@class="btnReservation"]/div/a/@href').extract_first()
            request = scrapy.Request(url, callback=self.parse_normal_showing)
            request.meta["data_proto"] = booking_data_proto.load_item()
            result_list.append(request)
コード例 #6
0
    def parse_showing(self, response, curr_showing, data_proto, result_list):
        def parse_time(time_str):
            time = time_str.split(":")
            return (int(time[0]), int(time[1]))

        showing_data_proto = ShowingLoader(response=response)
        showing_data_proto.add_value(None, data_proto.load_item())
        screen_name = curr_showing.xpath('./p/text()').extract_first()
        showing_data_proto.add_screen_name(screen_name)
        start_time = curr_showing.xpath(
            './/span[@class="strong fontXL"]/text()').extract_first()
        start_hour, start_minute = parse_time(start_time)
        showing_data_proto.add_value('start_time', self.get_time_from_text(
            start_hour, start_minute))
        end_time = curr_showing.xpath(
            './/span[@class="strong fontXL"]/../text()').extract_first()[1:]
        end_hour, end_minute = parse_time(end_time)
        showing_data_proto.add_value('end_time', self.get_time_from_text(
            end_hour, end_minute))
        showing_data_proto.add_value('seat_type', 'NormalSeat')

        # query screen number from database
        showing_data_proto.add_total_seat_count()
        # check whether need to continue crawl booking data or stop now
        if not self.crawl_booking_data:
            result_list.append(showing_data_proto.load_item())
            return

        booking_data_proto = init_show_booking_loader(response=response)
        booking_data_proto.add_value('showing', showing_data_proto.load_item())
        book_status = curr_showing.xpath('.//img/@src').extract_first()
        booking_data_proto.add_book_status(book_status, util=MovixUtil)
        book_status = booking_data_proto.get_output_value('book_status')
        if book_status in ['SoldOut', 'NotSold']:
            # sold out or not sold
            total_seat_count = showing_data_proto.get_output_value(
                'total_seat_count')
            book_seat_count = (
                total_seat_count if book_status == 'SoldOut' else 0)
            booking_data_proto.add_value('book_seat_count', book_seat_count)
            booking_data_proto.add_time_data()
            result_list.append(booking_data_proto.load_item())
            return
        else:
            # normal, need to crawl book number on order page
            showing_script = curr_showing.xpath('./@onclick').extract_first()
            url = re.findall(r'\(\'(.+?)\'\,', showing_script)[0]
            request = scrapy.Request(url, callback=self.parse_normal_showing)
            request.meta["data_proto"] = booking_data_proto.load_item()
            result_list.append(request)
コード例 #7
0
    def parse_showing(self, response, curr_showing,
                      showing_url_parameter, data_proto, result_list):
        def parse_time(time_str):
            """
            ex. "24:40"
            """
            time = time_str.split(":")
            return (int(time[0]), int(time[1]))
        showing_url_parameter['showing_cd'] = curr_showing['code']
        showing_data_proto = ShowingLoader(response=response)
        showing_data_proto.add_value(None, data_proto.load_item())
        # time like 24:40 can not be directly parsed,
        # so we need to shift time properly
        start_hour, start_minute = parse_time(curr_showing['showingStart'])
        showing_data_proto.add_value('start_time', self.get_time_from_text(
            start_hour, start_minute))
        end_hour, end_minute = parse_time(curr_showing['showingEnd'])
        showing_data_proto.add_value('end_time', self.get_time_from_text(
            end_hour, end_minute))
        showing_data_proto.add_value('seat_type', 'NormalSeat')

        # query screen number from database
        showing_data_proto.add_total_seat_count()
        # check whether need to continue crawl booking data or stop now
        if not self.crawl_booking_data:
            result_list.append(showing_data_proto.load_item())
            return
        booking_data_proto = init_show_booking_loader(response=response)
        booking_data_proto.add_value('showing', showing_data_proto.load_item())
        book_status = curr_showing['unsoldSeatInfo']['unsoldSeatStatus']
        booking_data_proto.add_book_status(book_status, util=TohoUtil)
        book_status = booking_data_proto.get_output_value('book_status')
        if book_status in ['SoldOut', 'NotSold']:
            # sold out or not sold
            total_seat_count = showing_data_proto.get_output_value(
                'total_seat_count')
            book_seat_count = (
                total_seat_count if book_status == 'SoldOut' else 0)
            booking_data_proto.add_value('book_seat_count', book_seat_count)
            booking_data_proto.add_time_data()
            result_list.append(booking_data_proto.load_item())
            return
        else:
            # normal, need to crawl book number on order page
            url = self.generate_showing_url(**showing_url_parameter)
            request = scrapy.Request(url,
                                     callback=self.parse_normal_showing)
            request.meta["data_proto"] = booking_data_proto.load_item()
            result_list.append(request)
コード例 #8
0
    def parse_showing(self, response, curr_showing, data_proto, result_list):
        def parse_time(time_str):
            return (int(time_str[:2]), int(time_str[2:]))

        showing_data_proto = ShowingLoader(response=response)
        showing_data_proto.add_value(None, data_proto.load_item())
        start_hour, start_minute = parse_time(curr_showing['start_time'])
        showing_data_proto.add_value(
            'start_time', self.get_time_from_text(start_hour, start_minute))
        end_hour, end_minute = parse_time(curr_showing['end_time'])
        showing_data_proto.add_value(
            'end_time', self.get_time_from_text(end_hour, end_minute))
        showing_data_proto.add_value('seat_type', 'NormalSeat')
        # TODO get seat type right now

        # query screen number from database
        showing_data_proto.add_total_seat_count()
        # check whether need to continue crawl booking data or stop now
        if not self.crawl_booking_data:
            result_list.append(showing_data_proto.load_item())
            return

        booking_data_proto = init_show_booking_loader(response=response)
        booking_data_proto.add_value('showing', showing_data_proto.load_item())
        booking_data_proto.add_book_status(curr_showing['available'],
                                           util=CinemaSunshineUtil)
        book_status = booking_data_proto.get_output_value('book_status')
        if book_status in ['SoldOut', 'NotSold']:
            # sold out or not sold
            total_seat_count = showing_data_proto.get_output_value(
                'total_seat_count')
            book_seat_count = (total_seat_count
                               if book_status == 'SoldOut' else 0)
            booking_data_proto.add_value('book_seat_count', book_seat_count)
            booking_data_proto.add_time_data()
            result_list.append(booking_data_proto.load_item())
            return
        else:
            # normal, need to crawl book number on order page
            url = curr_showing['url']
            request = scrapy.Request(url, callback=self.parse_pre_ordering)
            request.meta["data_proto"] = booking_data_proto.load_item()
            request.meta["dont_merge_cookies"] = True
            result_list.append(request)
コード例 #9
0
    def parse_showing(self, response, curr_showing, data_proto, result_list):
        showing_data_proto = ShowingLoader(response=response)
        showing_data_proto.add_value(None, data_proto.load_item())
        start_time = curr_showing.xpath(
            './div/text()').extract_first()[:-1]
        start_hour, start_minute = self.parse_time(start_time)
        showing_data_proto.add_value('start_time', self.get_time_from_text(
            start_hour, start_minute))
        # end time not displayed in schedule page

        showing_data_proto.add_value('seat_type', 'NormalSeat')

        # query screen number from database
        showing_data_proto.add_total_seat_count()
        # check whether need to continue crawl booking data or stop now
        if not self.crawl_booking_data:
            result_list.append(showing_data_proto.load_item())
            return

        booking_data_proto = init_show_booking_loader(response=response)
        booking_data_proto.add_value('showing', showing_data_proto.load_item())
        book_status = curr_showing.xpath('./div/@class').extract_first()
        booking_data_proto.add_book_status(book_status, util=KinezoUtil)
        book_status = booking_data_proto.get_output_value('book_status')
        if book_status in ['SoldOut', 'NotSold']:
            # sold out or not sold
            total_seat_count = showing_data_proto.get_output_value(
                'total_seat_count')
            book_seat_count = (
                total_seat_count if book_status == 'SoldOut' else 0)
            booking_data_proto.add_value('book_seat_count', book_seat_count)
            booking_data_proto.add_time_data()
            result_list.append(booking_data_proto.load_item())
            return
        else:
            # normal, need to crawl book number on order page
            url = curr_showing.xpath('./@href').extract_first()
            url = response.urljoin(url)
            request = scrapy.Request(url, callback=self.parse_normal_showing)
            request.meta["data_proto"] = booking_data_proto.load_item()
            result_list.append(request)
コード例 #10
0
 def parse(self, response):
     """
     crawl theater list data first
     """
     theater_list = response.xpath(
         '//div[@class="LNbowlingList LNshopList"]//a')
     for theater_element in theater_list:
         county_name = theater_element.xpath('./text()').extract_first()
         cinema_name = county_name + "コロナシネマワールド"
         data_proto = ShowingLoader(response=response)
         data_proto.add_cinema_name(cinema_name)
         cinema_name = data_proto.get_output_value('cinema_name')
         if not self.is_cinema_crawl([cinema_name]):
             continue
         curr_cinema_url = theater_element.xpath('./@href').extract_first()
         data_proto.add_cinema_site(curr_cinema_url, cinema_name)
         data_proto.add_value('source', self.name)
         cinema_name_en = curr_cinema_url.split('/')[-2]
         schedule_url = self.generate_cinema_schedule_url(
             cinema_name_en, self.date)
         request = scrapy.Request(schedule_url, callback=self.parse_cinema)
         request.meta["data_proto"] = data_proto.load_item()
         yield request
コード例 #11
0
 def parse(self, response):
     """
     crawl theater list data first
     """
     theater_link_list = response.xpath(
         '//div[contains(@class,"area")]//dd//a')
     for theater_link in theater_link_list:
         # forum site have multiple cinema on one site, so we need to
         # specify cinema name on schedule page
         city_name = theater_link.xpath('./text()').extract_first()
         cinema_name = "イオンシネマ" + city_name
         data_proto = ShowingLoader(response=response)
         data_proto.add_cinema_name(cinema_name)
         cinema_name = data_proto.get_output_value('cinema_name')
         if not self.is_cinema_crawl([cinema_name]):
             continue
         curr_cinema_url = theater_link.xpath('./@href').extract_first()
         curr_cinema_url = response.urljoin(curr_cinema_url)
         data_proto.add_cinema_site(curr_cinema_url, cinema_name)
         data_proto.add_value('source', self.name)
         request = scrapy.Request(curr_cinema_url,
                                  callback=self.parse_cinema)
         request.meta["data_proto"] = data_proto.load_item()
         yield request
コード例 #12
0
 def parse(self, response):
     """
     crawl theater list data first
     """
     theater_list = response.xpath('//section[@id="theatres"]//a')
     for theater_element in theater_list:
         curr_cinema_url = theater_element.xpath('./@href').extract_first()
         cinema_name = theater_element.xpath('./text()').extract_first()
         if cinema_name != "ムービル":
             cinema_name = "109シネマズ" + cinema_name
         data_proto = ShowingLoader(response=response)
         data_proto.add_cinema_name(cinema_name)
         cinema_name = data_proto.get_output_value('cinema_name')
         data_proto.add_cinema_site(response.urljoin(curr_cinema_url),
                                    cinema_name)
         data_proto.add_value('source', self.name)
         if not self.is_cinema_crawl([cinema_name]):
             continue
         cinema_name_en = curr_cinema_url.split('/')[-2]
         schedule_url = self.generate_cinema_schedule_url(
             cinema_name_en, self.date)
         request = scrapy.Request(schedule_url, callback=self.parse_cinema)
         request.meta["data_proto"] = data_proto.load_item()
         yield request
コード例 #13
0
 def parse(self, response):
     """
     crawl theater list data first
     """
     theater_list = response.xpath('//li[@class="clearfix"]')
     for theater_element in theater_list:
         cinema_name = theater_element.xpath(
             './p[@class="theaterName"]/a/text()').extract_first()
         data_proto = ShowingLoader(response=response)
         data_proto.add_cinema_name(cinema_name)
         cinema_name = data_proto.get_output_value('cinema_name')
         if not self.is_cinema_crawl([cinema_name]):
             continue
         curr_cinema_url = theater_element.xpath(
             './p[@class="theaterName"]/a/@href').extract_first()
         data_proto.add_cinema_site(response.urljoin(curr_cinema_url),
                                    cinema_name)
         data_proto.add_value('source', self.name)
         cinema_name_en = curr_cinema_url.split('/')[-1]
         json_url = self.generate_cinema_schedule_url(
             cinema_name_en, self.date)
         request = scrapy.Request(json_url, callback=self.parse_cinema)
         request.meta["data_proto"] = data_proto.load_item()
         yield request
コード例 #14
0
    def parse_showing(self, response, curr_showing, data_proto, result_list):
        def parse_time(time_str):
            time_str = unicodedata.normalize('NFKC', start_time)
            time = time_str.split(":")
            return (int(time[0]), int(time[1]))

        # showing section passed in may be unusable and need to be filtered
        time_section = curr_showing.xpath('./div[@class="time"]')
        if not time_section:
            return
        showing_data_proto = ShowingLoader(response=response)
        showing_data_proto.add_value(None, data_proto.load_item())
        start_time = time_section.xpath('./span/span/text()').extract_first()
        start_hour, start_minute = parse_time(start_time)
        showing_data_proto.add_value(
            'start_time', self.get_time_from_text(start_hour, start_minute))
        end_time = time_section.xpath('./span/text()').extract_first()
        end_hour, end_minute = parse_time(end_time)
        showing_data_proto.add_value(
            'end_time', self.get_time_from_text(end_hour, end_minute))
        screen_name = curr_showing.xpath('./div[2]/a/text()').extract_first()
        showing_data_proto.add_screen_name(screen_name)
        # when site ordering is stopped stop crawling
        site_status = curr_showing.xpath('./a/span[2]/text()').extract_first()
        if site_status == '予約停止中':
            return
        # handle free order seat type showings
        seat_type = curr_showing.xpath(
            './div[@class="icon"]//img/@alt').extract_first()
        showing_data_proto.add_value('seat_type',
                                     AeonUtil.standardize_seat_type(seat_type))

        # query screen number from database
        showing_data_proto.add_total_seat_count()
        # check whether need to continue crawl booking data or stop now
        if not self.crawl_booking_data:
            result_list.append(showing_data_proto.load_item())
            return

        booking_data_proto = init_show_booking_loader(response=response)
        booking_data_proto.add_value('showing', showing_data_proto.load_item())
        book_status = curr_showing.xpath('./a/span/text()').extract_first()
        booking_data_proto.add_book_status(book_status, util=AeonUtil)
        book_status = booking_data_proto.get_output_value('book_status')
        seat_type = showing_data_proto.get_output_value('seat_type')
        if (seat_type == 'FreeSeat' or book_status in ['SoldOut', 'NotSold']):
            # sold out or not sold
            total_seat_count = showing_data_proto.get_output_value(
                'total_seat_count')
            book_seat_count = (total_seat_count
                               if book_status == 'SoldOut' else 0)
            booking_data_proto.add_value('book_seat_count', book_seat_count)
            booking_data_proto.add_time_data()
            result_list.append(booking_data_proto.load_item())
            return
        else:
            # normal, generate request to showing page
            showing_request = self.generate_agreement_request(
                response=response, curr_showing=curr_showing)
            # go to shchedule page again to generate independent cookie
            # for each showing
            schedule_url = response.meta['schedule_url']
            request = scrapy.Request(schedule_url,
                                     dont_filter=True,
                                     callback=self.parse_new_cookie)
            request.meta["data_proto"] = booking_data_proto.load_item()
            request.meta["showing_request"] = showing_request
            (performance_id, _,
             _) = self.extract_showing_parameters(curr_showing)
            request.meta["cookiejar"] = performance_id
            result_list.append(request)
コード例 #15
0
    def parse_showing(self, response, curr_showing, data_proto, result_list):
        def parse_time(time_str):
            time = time_str.split(":")
            return (int(time[0]), int(time[1]))

        showing_data_proto = ShowingLoader(response=response)
        showing_data_proto.add_value(None, data_proto.load_item())
        start_time = curr_showing.xpath(
            './span[@class="start-time digit"]/text()').extract_first()
        start_hour, start_minute = parse_time(start_time)
        showing_data_proto.add_value(
            'start_time', self.get_time_from_text(start_hour, start_minute))
        end_time = curr_showing.xpath(
            './span[@class="end-time digit"]/text()').extract_first()
        end_hour, end_minute = parse_time(end_time)
        showing_data_proto.add_value(
            'end_time', self.get_time_from_text(end_hour, end_minute))
        # TODO cinema name extract failed
        # TODO extract name may be different from real name
        cinema_name = curr_showing.xpath(
            './span[@class="movie-info-theater"]/text()').extract_first()
        # if extract cinema name from showing info, use this one
        if cinema_name:
            showing_data_proto.replace_cinema_name(cinema_name)
        screen_name = "unknown"
        url = curr_showing.xpath(
            './span[@class="purchase-block"]/a/@href').extract_first()
        if url:
            # extract screen name by url parameter
            screen_number = re.findall(r'&sc=(\d+)&', url)
            if screen_number:
                screen_number = screen_number[-1]
                screen_name = "シアター" + screen_number
        # CANNOTSOLVE we cannot get screen name from site for
        # sold out and not sold showings so we have to give it a special
        # screen name
        showing_data_proto.add_screen_name(screen_name)
        showing_data_proto.add_value('seat_type', 'NormalSeat')

        # query screen number from database
        showing_data_proto.add_total_seat_count()
        # check whether need to continue crawl booking data or stop now
        if not self.crawl_booking_data:
            result_list.append(showing_data_proto.load_item())
            return

        booking_data_proto = init_show_booking_loader(response=response)
        booking_data_proto.add_value('showing', showing_data_proto.load_item())
        book_status = curr_showing.xpath(
            './span[@class="purchase-block"]/a/@class').extract_first()
        booking_data_proto.add_book_status(book_status, util=ForumUtil)
        book_status = booking_data_proto.get_output_value('book_status')
        if book_status in ['SoldOut', 'NotSold']:
            # sold out or not sold
            total_seat_count = showing_data_proto.get_output_value(
                'total_seat_count')
            book_seat_count = (total_seat_count
                               if book_status == 'SoldOut' else 0)
            booking_data_proto.add_value('book_seat_count', book_seat_count)
            booking_data_proto.add_time_data()
            result_list.append(booking_data_proto.load_item())
            return
        else:
            # normal, need to crawl book number on order page
            url = curr_showing.xpath(
                './span[@class="purchase-block"]/a/@href').extract_first()
            request = scrapy.Request(url, callback=self.parse_normal_showing)
            request.meta["data_proto"] = booking_data_proto.load_item()
            result_list.append(request)
コード例 #16
0
    def parse_showing(self, response, curr_showing, data_proto, result_list):
        def parse_time(time_str):
            time = time_str.split(":")
            return (int(time[0]), int(time[1]))

        showing_data_proto = ShowingLoader(response=response)
        showing_data_proto.add_value(None, data_proto.load_item())
        start_time = curr_showing.xpath(
            './div/ol/li[@class="startTime"]/text()').extract_first()
        start_hour, start_minute = parse_time(start_time)
        showing_data_proto.add_value('start_time', self.get_time_from_text(
            start_hour, start_minute))
        end_time = curr_showing.xpath(
            './div/ol/li[@class="endTime"]/text()').extract_first()[1:]
        end_hour, end_minute = parse_time(end_time)
        showing_data_proto.add_value('end_time', self.get_time_from_text(
            end_hour, end_minute))
        # handle free order seat type showings
        seat_type = curr_showing.xpath(
            './div/ul/li[@class="seatIcon"]/img/@src').extract_first()
        showing_data_proto.add_value(
            'seat_type', UnitedUtil.standardize_seat_type(seat_type))

        # query screen number from database
        showing_data_proto.add_total_seat_count()
        # check whether need to continue crawl booking data or stop now
        if not self.crawl_booking_data:
            result_list.append(showing_data_proto.load_item())
            return

        booking_data_proto = init_show_booking_loader(response=response)
        booking_data_proto.add_value('showing', showing_data_proto.load_item())
        book_status = curr_showing.xpath(
            './div/ul/li[@class="uolIcon"]//img[1]/@src').extract_first()
        booking_data_proto.add_book_status(book_status, util=UnitedUtil)
        book_status = booking_data_proto.get_output_value('book_status')
        seat_type = showing_data_proto.get_output_value('seat_type')
        if (seat_type == 'FreeSeat' or book_status in ['SoldOut', 'NotSold']):
            # sold out or not sold
            total_seat_count = showing_data_proto.get_output_value(
                'total_seat_count')
            book_seat_count = (
                total_seat_count if book_status == 'SoldOut' else 0)
            booking_data_proto.add_value('book_seat_count', book_seat_count)
            booking_data_proto.add_time_data()
            result_list.append(booking_data_proto.load_item())
            return
        else:
            # normal, need to crawl book number on order page
            # we will visit schedule page again to generate independent cookie
            # as same cookie will lead to confirm page
            url = curr_showing.xpath(
                './div/ul/li[@class="uolIcon"]/a/@href').extract_first()
            # determine if next page is 4dx confirm page by title
            title = showing_data_proto.get_output_value('title')
            if '4DX' in title:
                request = scrapy.Request(
                    url, callback=self.parse_4dx_confirm_page)
            else:
                request = scrapy.Request(
                    url, callback=self.parse_normal_showing)
            request.meta["data_proto"] = booking_data_proto.load_item()
            # use independent cookie to avoid affecting each other
            request.meta["cookiejar"] = url
            result_list.append(request)