def main(): print("\nAUTOMATED LITERATURE SEARCH\n") input('\n>> Press to see terms\n') terms_brain = [['theta oscillation'], ['beta oscillation']] terms_behav = [['memory'], ['motor']] print('BRAIN TERMS') for term in terms_brain: print('\t' + term[0]) print('BEHAV TERMS') for term in terms_behav: print('\t' + term[0]) input('\n>> Press to run scrape\n') # Initialize counts object counts = Count() counts.set_terms(terms_brain, 'A') counts.set_terms(terms_behav, 'B') counts.run_scrape(verbose=True) input('\n>> Press to see results\n') # Check the highest associations for each term counts.check_cooc('A') # Jump over to ERP-SCANR site input('\n>> Press to jump to project site\n') print("Opening ERP-SCANNER WebSite") webbrowser.get('Safari').open('http://tomdonoghue.github.io/ERP_SCANR/')
def test_scrape(): """Test that Count object successful scrapes data.""" counts = Count() # Add ERPs and terms counts.set_erps(['N400', 'P600']) counts.set_terms(['language', 'memory']) counts.set_exclusions(['protein', 'protein']) counts.scrape_data(db='pubmed') assert np.all(counts.dat_numbers) assert np.all(counts.dat_percent) check_funcs(counts)
def main(): """Run scrape of counts data.""" counts = Count() if TEST: counts.set_terms([['language'], ['visual']]) else: counts.terms['A'].set_terms_file(TERMS_FILE) # Reduce number of terms counts.terms['A'].terms = counts.terms['A'].terms[:40] counts.terms['A'].labels = counts.terms['A'].labels[:40] print('N terms:', len(counts.terms['A'].terms)) print('\n\nSTARTING COUNTS SCRAPE') print('RUNNING TERMS TYPE: ', TERMS_FILE, '\n\n') counts.run_scrape(verbose=True) print('\n\nCOUNTS SCRAPE FINISHED\n\n') save_pickle_obj(counts, S_NAME) print('\n\nCOUNTS SCRAPE SAVED\n\n')
def test_save_pickle_obj(): """Test the save_pickle_obj function.""" tdb = TDB() count_obj = Count() words_obj = Words() save_pickle_obj(count_obj, 'test', db=tdb) save_pickle_obj(words_obj, 'test', db=tdb) assert True # Test error checking with raises(InconsistentDataError): save_pickle_obj(['bad dat'], 'test_bad', db=tdb)
def test_scrape(): """Test that Count object successful scrapes data.""" counts = Count() counts.set_terms(['language', 'memory']) counts.set_exclusions(['protein', 'protein']) #counts.run_scrape(db='pubmed') # check_funcs(counts) # drop_data(counts) assert True
def test_count(): assert Count()
################################################################################################### # # Count Object # ------------ # # There is also an OOP interface available in LISC, # to organize the terms and data, and run scrapes. # # Note that the underlying code is the same - the count object ultimately calls # the same scrape function as above. ################################################################################################### # Initialize counts object counts = Count() ################################################################################################### # Set terms to run counts.set_terms(terms_a) ################################################################################################### # Run scrape counts.run_scrape(verbose=True) ################################################################################################### # # # The Counts object also comes with some helper methods to check out the data.
def test_erpsc_count(): """Test the Count object.""" # Check that ERPSCCount returns properly assert Count()