Example #1
0
    def identify_bio_pages(self, firm):
        fts = FirmTrainingSet.get_for_firm(firm)
        spider = Spider(firm.domain, os.path.join(self.scrape_dir, str(fts.id) + ".db"), workers=4, retry_attempts=2)

        self.stdout.write("Identifying bio pages...\n")

        model = PageClassifier(os.path.join(self.model_dir, str(fts.id) + "_page.tgm"))
        model.load()

        self.stdout.write('Retrieving page features...\n')

        bio_pages = []
        for url in spider.urls:
            page = spider.get(url)
            cat = str(model.predict(page))
            if cat == 'bio':
                self.stdout.write(' * ' + url)
                bio_pages.append(url)

        self.stdout.write('Done.')

        return bio_pages
Example #2
0
    def identify_bio_elements(self, firm, urls):
        fts = FirmTrainingSet.get_for_firm(firm)
        spider = Spider(firm.domain, os.path.join(self.scrape_dir, str(fts.id) + ".db"), workers=4, retry_attempts=2)

        self.stdout.write("Identifying bio elements...\n")

        model = ElementClassifier(os.path.join(settings.MODEL_DIR, 'model', str(fts.id) + "_element.tgm"))
        model.load()

        self.stdout.write('Retrieving element features...\n')

        elements_out = []
        for url in urls:
            page = spider.get(url)
            out_data = model.extract(page, format="html")
            out_data['url'] = url

            out_data = dict(out_data)
            print json.dumps(out_data, indent=4)
            elements_out.append(out_data)

        self.stdout.write('Done.\n')

        return elements_out