Ejemplo n.º 1
0
    def run(self, delete_existing_documents=False):
        # get default plugin
        print("getting crawl plugin info")
        self.crawl_config = self.get_default_plugin()

        # apply this specific plugin
        self.crawl_config.update(self.get_specific_plugin())

        # delete existing documents
        if delete_existing_documents:
            print("deleting documents before crawling")
            self.delete_existing_documents()
            seen_urls = []
        # add the already visited urls to the config
        else:
            print("getting seen urls")
            seen_urls = self.get_seen_urls()
        self.crawl_config['seen_urls'] = seen_urls

        print("getting template dict")
        # add the template for this crawl_plugin to the scraping config
        self.crawl_config['template_dict'] = self.get_template_dict()

        print("starting crawl")
        # separate out the save data while crawling and the newscraler
        if 'logging_level' in self.crawl_config:
            logging_level = int(self.crawl_config['logging_level'])
        else:
            logging_level = 3
        templated_dict = crawl.start(self.crawl_config,
                                     NewsCrawler,
                                     self.save_data,
                                     self.save_bulk_data,
                                     logging_level=logging_level,
                                     cache=self.cache)

        print('saving template..')
        self.save_template_dict(templated_dict)
        print('crawling/scraping', self.project_name, self.plugin_name, "DONE")
Ejemplo n.º 2
0
    def run(self, delete_existing_documents=False):
        # get default plugin
        print("getting crawl plugin info")
        self.crawl_config = self.get_default_plugin()

        # apply this specific plugin
        self.crawl_config.update(self.get_specific_plugin())

        # delete existing documents
        if delete_existing_documents:
            print("deleting documents before crawling")
            self.delete_existing_documents()
            seen_urls = []
        # add the already visited urls to the config
        else:
            print("getting seen urls")
            seen_urls = self.get_seen_urls()
        self.crawl_config['seen_urls'] = seen_urls

        print("getting template dict")
        # add the template for this crawl_plugin to the scraping config
        self.crawl_config['template_dict'] = self.get_template_dict()

        print("starting crawl")
        # separate out the save data while crawling and the newscraler
        if 'logging_level' in self.crawl_config:
            logging_level = int(self.crawl_config['logging_level'])
        else:
            logging_level = 3
        templated_dict = crawl.start(
            self.crawl_config, NewsCrawler, self.save_data, self.save_bulk_data,
            logging_level=logging_level, cache=self.cache)

        print('saving template..')
        self.save_template_dict(templated_dict)
        print('crawling/scraping', self.project_name, self.plugin_name, "DONE")
Ejemplo n.º 3
0
 def start_crawl(self):
     crawl.start(self.crawl_config, cache=self.cache)
Ejemplo n.º 4
0
    ],

    'index_filter_regexps': [

    ],

    'index_required_regexps': [
        '2015', '2014'
    ],

    'max_saved_responses': 100,

    'max_workers': 10,
})

crawl.start(CRAWL_CONFIG)

# Indexing

SCRAPE_CONFIG = CRAWL_CONFIG.copy()

SCRAPE_CONFIG.update({
    'template_proportion': 0.09,
    'max_templates': 1000
})

skindex = Scraper(SCRAPE_CONFIG)

skindex.load_local_pages()
skindex.add_template_elements()
Ejemplo n.º 5
0
# Crawling
CRAWL_CONFIG = DEFAULT_CRAWL_CONFIG
CRAWL_CONFIG.update({
    'seed_urls': ['http://www.techcrunch.com/'],
    'collections_path': '/Users/pascal/egoroot/sky_collections',
    'collection_name': 'techie',

    # Optional
    'crawl_filter_regexps': [],
    'crawl_required_regexps': ['2015', '2014'],
    'index_filter_regexps': [],
    'index_required_regexps': ['2015', '2014'],
    'max_saved_responses': 100,
    'max_workers': 10,
})

crawl.start(CRAWL_CONFIG)

# Indexing

SCRAPE_CONFIG = CRAWL_CONFIG.copy()

SCRAPE_CONFIG.update({'template_proportion': 0.09, 'max_templates': 1000})

skindex = Scraper(SCRAPE_CONFIG)

skindex.load_local_pages()
skindex.add_template_elements()

res = skindex.process_all(remove_visuals=True)
Ejemplo n.º 6
0
Archivo: view.py Proyecto: bgarrels/sky
    def post(self):
        CRAWL_CONFIG = DEFAULT_CRAWL_CONFIG
        CRAWL_CONFIG.update({
            'collections_path': os.path.join(os.path.expanduser('~'), 'sky_collections/'),
            # 'max_workers': 10,
        })
        args = self.request.arguments
        print(args)
        for arg in args:
            value = args[arg][0].decode('utf8')
            if value and arg != 'url' and arg != 'checkboxcache':
                print('pre', arg, CRAWL_CONFIG[arg])
                if isinstance(CRAWL_CONFIG[arg], list):
                    CRAWL_CONFIG[arg] = [int(value)] if is_numeric(value) else [value]
                else:
                    CRAWL_CONFIG[arg] = int(value) if is_numeric(value) else value
                print('post', arg, CRAWL_CONFIG[arg])

        url = self.get_argument('url', '')

        use_cache = self.get_argument('checkboxcache', '')

        domain = extractDomain(url)
        CRAWL_CONFIG['seed_urls'] = [url]
        CRAWL_CONFIG['collection_name'] = domain[7:]

        if use_cache != 'on':

            col_path = os.path.join(CRAWL_CONFIG['collections_path'],
                                    CRAWL_CONFIG['collection_name'])
            print(col_path)
            if os.path.exists(col_path):
                shutil.rmtree(col_path)

            crawl.start(CRAWL_CONFIG)

        SCRAPE_CONFIG = CRAWL_CONFIG.copy()

        SCRAPE_CONFIG.update({
            'template_proportion': 0.4,
            'max_templates': 100
        })

        skindex = Scraper(SCRAPE_CONFIG)

        skindex.load_local_pages()
        skindex.add_template_elements()

        res = skindex.process_all(remove_visuals=True,
                                  maxn=CRAWL_CONFIG['max_saved_responses'])

        items = []
        for num, url in enumerate(res):
            if num == CRAWL_CONFIG['max_saved_responses']:
                break
            dc = res[url]
            dc['url'] = url
            dc['source_name'] = domain
            dc['images'] = [x for x in reversed(dc['images'][:5])]
            # dc['blobs'] = [TextBlob(x) for x in dc['body'] if dc['body']]
            items.append(dc)

        # this is quite out of place like this
        print('num unique images', len(get_image_set({x['url']: x for x in items})))

        if items and 'money' in items[0]:
            items = sorted(items, key=lambda x: len(x['money']), reverse=True)

        self.render('page_template.html', items=items, cached=False)
Ejemplo n.º 7
0
    def post(self):
        CRAWL_CONFIG = DEFAULT_CRAWL_CONFIG.copy()
        CRAWL_CONFIG.update(
            {
                "collections_path": os.path.join(os.path.expanduser("~"), "sky_view_collections/"),
                # 'max_workers': 10,
            }
        )
        args = self.request.arguments
        print(args)
        for arg in args:
            value = args[arg][0].decode("utf8")
            if value and arg != "url" and arg != "checkboxcache":
                print("pre", arg, CRAWL_CONFIG[arg])
                if isinstance(CRAWL_CONFIG[arg], list):
                    CRAWL_CONFIG[arg] = [int(value)] if is_numeric(value) else value.split(", ")
                else:
                    CRAWL_CONFIG[arg] = int(value) if is_numeric(value) else value.split(", ")[0]
                print("post", arg, CRAWL_CONFIG[arg])

        url = self.get_argument("url", "")

        use_cache = self.get_argument("checkboxcache", "")

        domain = extractDomain(url)
        CRAWL_CONFIG["seed_urls"] = [url]
        CRAWL_CONFIG["collection_name"] = domain[7:]

        if use_cache != "on":

            col_path = os.path.join(CRAWL_CONFIG["collections_path"], CRAWL_CONFIG["collection_name"])
            print(col_path)
            if os.path.exists(col_path):
                shutil.rmtree(col_path)

            crawl.start(CRAWL_CONFIG)

        SCRAPE_CONFIG = CRAWL_CONFIG.copy()

        SCRAPE_CONFIG.update({"template_proportion": 0.4, "max_templates": 100})

        skindex = Scraper(SCRAPE_CONFIG)

        skindex.load_local_pages()
        skindex.add_template_elements()

        res = skindex.process_all(remove_visuals=True, maxn=CRAWL_CONFIG["max_saved_responses"])

        items = []
        for num, url in enumerate(res):
            if num == CRAWL_CONFIG["max_saved_responses"]:
                break
            dc = res[url]
            dc["url"] = url
            dc["source_name"] = domain
            dc["images"] = [x for x in reversed(dc["images"][:5])]
            # dc['blobs'] = [TextBlob(x) for x in dc['body'] if dc['body']]
            items.append(dc)

        # this is quite out of place like this
        print("num unique images", len(get_image_set({x["url"]: x for x in items})))

        if items and "money" in items[0]:
            items = sorted(items, key=lambda x: len(x["money"]), reverse=True)

        self.render("page_template.html", items=items, cached=False)
Ejemplo n.º 8
0
    def post(self):
        CRAWL_CONFIG = DEFAULT_CRAWL_CONFIG.copy()
        CRAWL_CONFIG.update({
            'collections_path': os.path.join(os.path.expanduser('~'), 'sky_view_collections/'),
            # 'max_workers': 10,
        })
        args = self.request.arguments
        print(args)
        for arg in args:
            value = args[arg][0].decode('utf8')
            if value and arg != 'url' and arg != 'checkboxcache':
                print('pre', arg, CRAWL_CONFIG[arg])
                if isinstance(CRAWL_CONFIG[arg], list):
                    CRAWL_CONFIG[arg] = [int(value)] if is_numeric(value) else value.split(', ')
                else:
                    CRAWL_CONFIG[arg] = int(value) if is_numeric(value) else value.split(', ')[0]
                print('post', arg, CRAWL_CONFIG[arg])

        url = self.get_argument('url', '')

        use_cache = self.get_argument('checkboxcache', '')

        domain = extractDomain(url)
        CRAWL_CONFIG['seed_urls'] = [url]
        if domain.startswith("http"):
            CRAWL_CONFIG['collection_name'] = domain.split("/")[2]
        else:
            CRAWL_CONFIG['collection_name'] = domain.split("/")[0]

        if use_cache != 'on':

            col_path = os.path.join(CRAWL_CONFIG['collections_path'],
                                    CRAWL_CONFIG['collection_name'])
            print(col_path)
            if os.path.exists(col_path):
                shutil.rmtree(col_path)

            crawl.start(CRAWL_CONFIG)

        SCRAPE_CONFIG = CRAWL_CONFIG.copy()

        SCRAPE_CONFIG.update({
            'template_proportion': 0.4,
            'max_templates': 100,
        })

        skindex = Scraper(SCRAPE_CONFIG)

        skindex.load_local_pages()
        skindex.add_template_elements()

        res = skindex.process_all(remove_visuals=True,
                                  maxn=CRAWL_CONFIG['max_saved_responses'])

        items = []
        for num, url in enumerate(res):
            if num == CRAWL_CONFIG['max_saved_responses']:
                break
            dc = res[url]
            dc['url'] = url
            dc['source_name'] = domain
            dc['images'] = [x for x in reversed(dc['images'][:5])]
            # dc['blobs'] = [TextBlob(x) for x in dc['body'] if dc['body']]
            items.append(dc)

        # this is quite out of place like this
        print('num unique images', len(get_image_set({x['url']: x for x in items})))

        if items and 'money' in items[0]:
            items = sorted(items, key=lambda x: len(x['money']), reverse=True)

        self.render('page_template.html', items=items, cached=False)
Ejemplo n.º 9
0
 def start_crawl(self):
     crawl.start(self.crawl_config, cache=self.cache)