def test_remove_all_keywords(self): burst = Burst.run(Config()) keyword_dataset = self._make_stub_keyword_dataset() for idx, result in burst.add_keyword(keyword_dataset): pass self.assertEqual(burst.remove_all_keywords(), True) burst.stop()
def test_get_results(self): burst = Burst.run(Config()) burst.get_result('keyword') burst.stop()
def test_add_documents(self): burst = Burst.run(Config()) document_dataset = self._make_stub_document_dataset() for idx, result in burst.add_documents(document_dataset): self.assertEqual(result, 1) burst.stop()
def test_add_keyword(self): burst = Burst.run(Config()) keyword_dataset = self._make_stub_keyword_dataset() for idx, result in burst.add_keyword(keyword_dataset): self.assertEqual(result, True) burst.stop()
def test_embedded(self): burst = Burst.run(Config(), embedded=True) burst.stop()
def test_get_all_bursted_results_at(self): burst = Burst.run(Config()) self.assertRaises( ValueError, burst.get_all_bursted_results_at, 'hoge') burst.get_all_bursted_results_at(10) burst.stop()
def test_get_all_keywords(self): burst = Burst.run(Config()) burst.get_all_keywords() burst.stop()
def test_get_all_bursted_results_at(self): burst = Burst.run(Config()) self.assertRaises(ValueError, burst.get_all_bursted_results_at, 'hoge') burst.get_all_bursted_results_at(10) burst.stop()
def test_get_all_bursted_results(self): burst = Burst.run(Config()) burst.get_all_bursted_results() burst.stop()
def test_get_result_at(self): burst = Burst.run(Config()) self.assertRaises(ValueError, burst.get_result_at, 'keyword', 'hoge') burst.get_result_at('keyword', 10) burst.stop()
def test_get_result_at(self): burst = Burst.run(Config()) self.assertRaises( ValueError, burst.get_result_at, 'keyword', 'hoge') burst.get_result_at('keyword', 10) burst.stop()
from jubakit.burst import DocumentSchema, DocumentDataset from jubakit.burst import Burst, Config from jubakit.loader.csv import CSVLoader keyword_loader = CSVLoader('burst_keywords.csv') keyword_schema = KeywordSchema({ 'keyword': KeywordSchema.KEYWORD, 'scaling': KeywordSchema.SCALING, 'gamma': KeywordSchema.GAMMA }) keyword_dataset = KeywordDataset(keyword_loader, keyword_schema) document_loader = CSVLoader('burst_documents.csv') document_schema = DocumentSchema({ 'position': DocumentSchema.POSITION, 'text': DocumentSchema.TEXT }) document_dataset = DocumentDataset(document_loader, document_schema) burst = Burst.run(Config()) for _ in burst.add_keyword(keyword_dataset): pass for _ in burst.add_documents(document_dataset): pass for result in burst.get_result('burst').batches: print(result) burst.stop()
def test_simple(self): burst = Burst() burst.stop()
from jubakit.burst import KeywordSchema, KeywordDataset from jubakit.burst import DocumentSchema, DocumentDataset from jubakit.burst import Burst, Config from jubakit.loader.csv import CSVLoader keyword_loader = CSVLoader('burst_keywords.csv') keyword_schema = KeywordSchema({ 'keyword': KeywordSchema.KEYWORD, 'scaling': KeywordSchema.SCALING, 'gamma': KeywordSchema.GAMMA }) keyword_dataset = KeywordDataset(keyword_loader, keyword_schema) document_loader = CSVLoader('burst_documents.csv') document_schema = DocumentSchema({ 'position': DocumentSchema.POSITION, 'text': DocumentSchema.TEXT }) document_dataset = DocumentDataset(document_loader, document_schema) burst = Burst.run(Config()) for _ in burst.add_keyword(keyword_dataset): pass for _ in burst.add_documents(document_dataset): pass for result in burst.get_result('burst').batches: print(result) burst.stop()