def supports_fetching_council_members(): config = Config(hostname="phila.legistar.com", fulltext=True).defaults(DEFAULT_CONFIG) scraper = LegistarScraper(config) members = scraper.councilMembers() try: members.next() except StopIteration: fail("no council members found")
def supports_fetching_council_members(): config = Config(hostname='phila.legistar.com', fulltext=True).defaults(DEFAULT_CONFIG) scraper = LegistarScraper(config) members = scraper.councilMembers() try: members.next() except StopIteration: fail('no council members found')
def paging_through_council_members(): config = Config(hostname="a2gov.legistar.com", fulltext=False).defaults(DEFAULT_CONFIG) scraper = LegistarScraper(config) members = list(scraper.councilMembers(follow_links=False)) assert_greater(len(members), 100)
def paging_through_council_members(): config = Config(hostname='a2gov.legistar.com', fulltext=False).defaults(DEFAULT_CONFIG) scraper = LegistarScraper(config) members = list(scraper.councilMembers(follow_links=False)) assert_greater(len(members), 100)
writer.writerow(csv_headers) for scraper_target in settings.SCRAPER_TARGETS: print "Municipality:", scraper_target['municipality'] print "Endpoint:", scraper_target['endpoint'] config = Config( hostname = scraper_target['endpoint'], fulltext=True, ).defaults(DEFAULT_CONFIG) scraper = LegistarScraper(config) # get all agenda items members_list = scraper.councilMembers(follow_links=False) try: for member in members_list: member['Municipality'] = scraper_target['municipality'] # member['Created Date'] = datetime.datetime.utcnow() row = [] for col in csv_headers: extracted_val = '' if col in member.keys(): if type(member[col]) == dict: extracted_val = member[col]['label'] else: extracted_val = member[col]