Ejemplo n.º 1
0
    def __init__(self, config):
        self.config = config
        self.index_required_regexps = []
        self.index_filter_regexps = []
        self.detected_language = None
        self.seed_urls = None
        self.domain = None
        self.collections_path = None
        self.collection_name = None
        self.template_proportion = None
        self.min_templates = None
        self.max_templates = None
        self.applyConfigFile()
        self.domain = extractDomain(self.seed_urls[0])
        self.url_to_tree_mapping = {}
        self.url_to_headers_mapping = {}

        # Boilerplate remover class
        self.domain_nodes_dict = DomainNodesDict(self.domain,
                                                 self.min_templates,
                                                 self.max_templates,
                                                 self.template_proportion)
        if 'template_dict' in config:
            self.domain_nodes_dict.update(dict(config['template_dict']))
            vals = self.domain_nodes_dict.values()
            self.domain_nodes_dict.num_urls = max(vals) if vals else 0
Ejemplo n.º 2
0
    def __init__(self, config):
        self.config = config
        self.index_required_regexps = []
        self.index_filter_regexps = []
        self.detected_language = None
        self.seed_urls = None
        self.domain = None
        self.collections_path = None
        self.collection_name = None
        self.template_proportion = None
        self.min_templates = None
        self.max_templates = None
        self.applyConfigFile()
        self.domain = extractDomain(self.seed_urls[0])
        self.url_to_tree_mapping = {}
        self.url_to_headers_mapping = {}

        # Boilerplate remover class
        self.domain_nodes_dict = DomainNodesDict(
            self.domain, self.min_templates, self.max_templates, self.template_proportion)
        if 'template_dict' in config:
            self.domain_nodes_dict.update(dict(config['template_dict']))
            vals = self.domain_nodes_dict.values()
            self.domain_nodes_dict.num_urls = max(vals) if vals else 0
Ejemplo n.º 3
0
Archivo: view.py Proyecto: bgarrels/sky
    def post(self):
        CRAWL_CONFIG = DEFAULT_CRAWL_CONFIG
        CRAWL_CONFIG.update({
            'collections_path': os.path.join(os.path.expanduser('~'), 'sky_collections/'),
            # 'max_workers': 10,
        })
        args = self.request.arguments
        print(args)
        for arg in args:
            value = args[arg][0].decode('utf8')
            if value and arg != 'url' and arg != 'checkboxcache':
                print('pre', arg, CRAWL_CONFIG[arg])
                if isinstance(CRAWL_CONFIG[arg], list):
                    CRAWL_CONFIG[arg] = [int(value)] if is_numeric(value) else [value]
                else:
                    CRAWL_CONFIG[arg] = int(value) if is_numeric(value) else value
                print('post', arg, CRAWL_CONFIG[arg])

        url = self.get_argument('url', '')

        use_cache = self.get_argument('checkboxcache', '')

        domain = extractDomain(url)
        CRAWL_CONFIG['seed_urls'] = [url]
        CRAWL_CONFIG['collection_name'] = domain[7:]

        if use_cache != 'on':

            col_path = os.path.join(CRAWL_CONFIG['collections_path'],
                                    CRAWL_CONFIG['collection_name'])
            print(col_path)
            if os.path.exists(col_path):
                shutil.rmtree(col_path)

            crawl.start(CRAWL_CONFIG)

        SCRAPE_CONFIG = CRAWL_CONFIG.copy()

        SCRAPE_CONFIG.update({
            'template_proportion': 0.4,
            'max_templates': 100
        })

        skindex = Scraper(SCRAPE_CONFIG)

        skindex.load_local_pages()
        skindex.add_template_elements()

        res = skindex.process_all(remove_visuals=True,
                                  maxn=CRAWL_CONFIG['max_saved_responses'])

        items = []
        for num, url in enumerate(res):
            if num == CRAWL_CONFIG['max_saved_responses']:
                break
            dc = res[url]
            dc['url'] = url
            dc['source_name'] = domain
            dc['images'] = [x for x in reversed(dc['images'][:5])]
            # dc['blobs'] = [TextBlob(x) for x in dc['body'] if dc['body']]
            items.append(dc)

        # this is quite out of place like this
        print('num unique images', len(get_image_set({x['url']: x for x in items})))

        if items and 'money' in items[0]:
            items = sorted(items, key=lambda x: len(x['money']), reverse=True)

        self.render('page_template.html', items=items, cached=False)
Ejemplo n.º 4
0
    def post(self):
        CRAWL_CONFIG = DEFAULT_CRAWL_CONFIG.copy()
        CRAWL_CONFIG.update(
            {
                "collections_path": os.path.join(os.path.expanduser("~"), "sky_view_collections/"),
                # 'max_workers': 10,
            }
        )
        args = self.request.arguments
        print(args)
        for arg in args:
            value = args[arg][0].decode("utf8")
            if value and arg != "url" and arg != "checkboxcache":
                print("pre", arg, CRAWL_CONFIG[arg])
                if isinstance(CRAWL_CONFIG[arg], list):
                    CRAWL_CONFIG[arg] = [int(value)] if is_numeric(value) else value.split(", ")
                else:
                    CRAWL_CONFIG[arg] = int(value) if is_numeric(value) else value.split(", ")[0]
                print("post", arg, CRAWL_CONFIG[arg])

        url = self.get_argument("url", "")

        use_cache = self.get_argument("checkboxcache", "")

        domain = extractDomain(url)
        CRAWL_CONFIG["seed_urls"] = [url]
        CRAWL_CONFIG["collection_name"] = domain[7:]

        if use_cache != "on":

            col_path = os.path.join(CRAWL_CONFIG["collections_path"], CRAWL_CONFIG["collection_name"])
            print(col_path)
            if os.path.exists(col_path):
                shutil.rmtree(col_path)

            crawl.start(CRAWL_CONFIG)

        SCRAPE_CONFIG = CRAWL_CONFIG.copy()

        SCRAPE_CONFIG.update({"template_proportion": 0.4, "max_templates": 100})

        skindex = Scraper(SCRAPE_CONFIG)

        skindex.load_local_pages()
        skindex.add_template_elements()

        res = skindex.process_all(remove_visuals=True, maxn=CRAWL_CONFIG["max_saved_responses"])

        items = []
        for num, url in enumerate(res):
            if num == CRAWL_CONFIG["max_saved_responses"]:
                break
            dc = res[url]
            dc["url"] = url
            dc["source_name"] = domain
            dc["images"] = [x for x in reversed(dc["images"][:5])]
            # dc['blobs'] = [TextBlob(x) for x in dc['body'] if dc['body']]
            items.append(dc)

        # this is quite out of place like this
        print("num unique images", len(get_image_set({x["url"]: x for x in items})))

        if items and "money" in items[0]:
            items = sorted(items, key=lambda x: len(x["money"]), reverse=True)

        self.render("page_template.html", items=items, cached=False)
Ejemplo n.º 5
0
 def applyConfigFile(self):
     for config_key, config_value in self.config.items():
         setattr(self, config_key, config_value)
     self.domain = extractDomain(self.seed_urls[0])
Ejemplo n.º 6
0
    def post(self):
        CRAWL_CONFIG = DEFAULT_CRAWL_CONFIG.copy()
        CRAWL_CONFIG.update({
            'collections_path': os.path.join(os.path.expanduser('~'), 'sky_view_collections/'),
            # 'max_workers': 10,
        })
        args = self.request.arguments
        print(args)
        for arg in args:
            value = args[arg][0].decode('utf8')
            if value and arg != 'url' and arg != 'checkboxcache':
                print('pre', arg, CRAWL_CONFIG[arg])
                if isinstance(CRAWL_CONFIG[arg], list):
                    CRAWL_CONFIG[arg] = [int(value)] if is_numeric(value) else value.split(', ')
                else:
                    CRAWL_CONFIG[arg] = int(value) if is_numeric(value) else value.split(', ')[0]
                print('post', arg, CRAWL_CONFIG[arg])

        url = self.get_argument('url', '')

        use_cache = self.get_argument('checkboxcache', '')

        domain = extractDomain(url)
        CRAWL_CONFIG['seed_urls'] = [url]
        if domain.startswith("http"):
            CRAWL_CONFIG['collection_name'] = domain.split("/")[2]
        else:
            CRAWL_CONFIG['collection_name'] = domain.split("/")[0]

        if use_cache != 'on':

            col_path = os.path.join(CRAWL_CONFIG['collections_path'],
                                    CRAWL_CONFIG['collection_name'])
            print(col_path)
            if os.path.exists(col_path):
                shutil.rmtree(col_path)

            crawl.start(CRAWL_CONFIG)

        SCRAPE_CONFIG = CRAWL_CONFIG.copy()

        SCRAPE_CONFIG.update({
            'template_proportion': 0.4,
            'max_templates': 100,
        })

        skindex = Scraper(SCRAPE_CONFIG)

        skindex.load_local_pages()
        skindex.add_template_elements()

        res = skindex.process_all(remove_visuals=True,
                                  maxn=CRAWL_CONFIG['max_saved_responses'])

        items = []
        for num, url in enumerate(res):
            if num == CRAWL_CONFIG['max_saved_responses']:
                break
            dc = res[url]
            dc['url'] = url
            dc['source_name'] = domain
            dc['images'] = [x for x in reversed(dc['images'][:5])]
            # dc['blobs'] = [TextBlob(x) for x in dc['body'] if dc['body']]
            items.append(dc)

        # this is quite out of place like this
        print('num unique images', len(get_image_set({x['url']: x for x in items})))

        if items and 'money' in items[0]:
            items = sorted(items, key=lambda x: len(x['money']), reverse=True)

        self.render('page_template.html', items=items, cached=False)