Beispiel #1
0
    def _get_images(self, images):
        def download(remote, local):
            util.save_data_to_file(util.request(remote), local)

        not_cached = [(img['remote'], img['local']) for img in images
                      if not os.path.exists(img['local'])]
        util.run_parallel_in_threads(download, not_cached)
Beispiel #2
0
    def _fill_categories_parallel(self, list, categories):
        def process_category(title, url):
            page = util.request(url)
            img_m = re.search(CATEGORY_IMG_RE, page, re.IGNORECASE)
            img = img_m and self._url(img_m.group(1))
            page = util.substr(page, LISTING_START, LISTING_END)
            finditer = False
            for m in re.finditer(LISTING_ITER_RE, page,
                                 re.DOTALL | re.IGNORECASE):
                finditer = True
                # payed content
                if m.group('playimg').find(YELLOW_IMG) != -1:
                    return
                break
            # no links
            if not finditer:
                return
            item = self.dir_item()
            item['title'] = title
            item['url'] = url
            item['img'] = img
            with lock:
                self._filter(list, item)

        lock = Lock()
        util.run_parallel_in_threads(process_category, categories)
Beispiel #3
0
 def fill_list_parallel(list, matches):
     def process_match(m):
         image,plot = self._get_meta(m.group('name'),self._url(m.group('url')))
         item = self.dir_item()
         item['title'] = m.group('name')
         item['url'] = m.group('url')+'video/'
         item['img'] = image
         item['plot'] = plot
         with lock:
             self._filter(list, item)
     lock = Lock()
     util.run_parallel_in_threads(process_match, matches)
Beispiel #4
0
        def fill_list_parallel(list, matches):
            def process_match(m):
                image, plot = self._get_meta(m.group('name'),
                                             self._url(m.group('url')))
                item = self.dir_item()
                item['title'] = m.group('name')
                item['url'] = m.group('url') + 'video/'
                item['img'] = image
                item['plot'] = plot
                with lock:
                    self._filter(list, item)

            lock = Lock()
            util.run_parallel_in_threads(process_match, matches)
 def paralel_search(search):
     def do_search(p,what):
         res = []
         try:
             result = p.provider.search(what)
             for item in result:
                 item['title'] = '[%s] %s' % (p.provider.name,item['title'])
                 if item['type'] == 'next':
                     item['type'] = 'dir'
                     item['title'] = '[%s] %s >>' % (p.provider.name,__language__(30063))
         except:
             traceback.print_exc()
         with lock:
             p.list(result)
     lock = Lock()
     util.run_parallel_in_threads(do_search, search)
 def paralel_search(search):
     def do_search(p,what):
         res = []
         try:
             result = p.provider.search(what)
             for item in result:
                 item['title'] = '[%s] %s' % (p.provider.name,item['title'])
                 if item['type'] == 'next':
                     item['type'] = 'dir'
                     item['title'] = '[%s] %s >>' % (p.provider.name,__language__(30063))
         except:
             traceback.print_exc()
         try:
             lock.acquire()
             p.list(result)
         finally:
             lock.release()
     lock = Lock()
     util.run_parallel_in_threads(do_search, search)
Beispiel #7
0
 def _request_parallel(self, requests):
     def fetch(req, *args):
         return util.request(req), args
     pages = []
     q = util.run_parallel_in_threads(fetch, requests)
     while True:
         try:
             page, args = q.get_nowait()
         except:
             break
         pages.append([page, args])
     return pages
 def _request_parallel(self, requests):
     def fetch(req, *args):
         return util.request(req), args
     pages = []
     q = util.run_parallel_in_threads(fetch, requests)
     while True:
         try:
             page, args = q.get_nowait()
         except:
             break
         pages.append([page, args])
     return pages
 def _fill_categories_parallel(self, list, categories):
     def process_category(title, url):
         page = util.request(url)
         img_m = re.search(CATEGORY_IMG_RE, page, re.IGNORECASE)
         img = img_m and self._url(img_m.group(1))
         page = util.substr(page, LISTING_START, LISTING_END)
         finditer = False
         for m in re.finditer(LISTING_ITER_RE, page, re.DOTALL | re.IGNORECASE):
             finditer = True
             # payed content
             if m.group('playimg').find(YELLOW_IMG) != -1:
                 return
             break
         # no links
         if not finditer:
             return
         item = self.dir_item()
         item['title'] = title
         item['url'] = url
         item['img'] = img
         with lock:
             self._filter(list, item)
     lock = Lock()
     util.run_parallel_in_threads(process_category, categories)
Beispiel #10
0
 def _get_images(self, images):
     def download(remote, local):
         util.save_data_to_file(util.request(remote), local)
     not_cached = [(img['remote'], img['local'])
                    for img in images if not os.path.exists(img['local'])]
     util.run_parallel_in_threads(download, not_cached)