def test_no_rows_second_insert(self): "Nothing happens if no rows are inserted to a table that is there." dt = DumpTruck(dbname="/tmp/test.db") dt.create_table({"foo": "uhtnh", "bar": "aoue"}, "ninety") dt.insert([], "ninety") c = dt.execute("select count(*) as c from ninety")[0]["c"] dt.close() self.assertEqual(c, 0)
def test_empty_row_second_insert(self): "An empty row acts like any other row." dt = DumpTruck(dbname="/tmp/test.db") dt.create_table({"foo": "uhtnh", "bar": "aoue"}, "nine") dt.insert({}, "nine") c = dt.execute("select count(*) as c from nine")[0]["c"] dt.close() self.assertEqual(c, 1)
def test_second_insert(self): "Inserting a second row that is all null adds an empty row." dt = DumpTruck(dbname = '/tmp/test.db') dt.create_table({'foo': 'uhtnh', 'bar': 'aoue'}, 'three') dt.insert({'foo': None, 'bar': None}, 'three') c = dt.execute('select count(*) as c from three')[0]['c'] dt.close() self.assertEqual(c, 1)
def test_empty_row_second_insert(self): "An empty row acts like any other row." dt = DumpTruck(dbname = '/tmp/test.db') dt.create_table({'foo': 'uhtnh', 'bar': 'aoue'}, 'nine') dt.insert({}, 'nine') c = dt.execute('select count(*) as c from nine')[0]['c'] dt.close() self.assertEqual(c, 1)
def test_second_insert(self): "Inserting a second row that is all null adds an empty row." dt = DumpTruck(dbname="/tmp/test.db") dt.create_table({"foo": "uhtnh", "bar": "aoue"}, "three") dt.insert({"foo": None, "bar": None}, "three") c = dt.execute("select count(*) as c from three")[0]["c"] dt.close() self.assertEqual(c, 1)
def test_no_rows_second_insert(self): "Nothing happens if no rows are inserted to a table that is there." dt = DumpTruck(dbname = '/tmp/test.db') dt.create_table({'foo': 'uhtnh', 'bar': 'aoue'}, 'ninety') dt.insert([], 'ninety') c = dt.execute('select count(*) as c from ninety')[0]['c'] dt.close() self.assertEqual(c, 0)
def apis(): dt = DumpTruck('/tmp/open-data.sqlite', auto_commit = False) dt.create_table({'catalog':'abc.def'}, 'socrata_apis') dt.create_index(['catalog'], 'socrata_apis', unique = True, if_not_exists = True) socrata_catalogs = filter(lambda x: x[0] == 'socrata', catalogs()) for _, catalog in socrata_catalogs: dt.upsert({ 'catalog': catalog.split('://')[-1], 'apis': count_apis(catalog), }, 'socrata_apis')
def main(): edges = build_network()['edges'] dt = DumpTruck(dbname = '/tmp/open-data.sqlite', adapt_and_convert = True) datasets_in = dt.execute('SELECT * FROM socrata') dt.create_table({'id': 'blah-blah'}, 'socrata_deduplicated') dt.create_index(['id'], 'socrata_deduplicated', if_not_exists = True, unique = True) for dataset in dedupe(datasets_in, edges): dt.upsert(dataset, 'socrata_deduplicated')
def test_create_table(self): h = DumpTruck(dbname = '/tmp/test.db') h.create_table({'foo': 0, 'bar': 1, 'baz': 2}, 'zombies') h.close() connection=sqlite3.connect('/tmp/test.db') cursor=connection.cursor() cursor.execute('SELECT foo, bar, baz FROM zombies') observed = cursor.fetchall() connection.close() expected = [] self.assertListEqual(observed, expected)
def test_create_table(self): h = DumpTruck(dbname="/tmp/test.db") h.create_table({"foo": 0, "bar": 1, "baz": 2}, "zombies") h.close() connection = sqlite3.connect("/tmp/test.db") cursor = connection.cursor() cursor.execute("SELECT foo, bar, baz FROM zombies") observed = cursor.fetchall() connection.close() expected = [] self.assertListEqual(observed, expected)
def extract_dataset_table_info(): dt = DumpTruck(dbname = '/tmp/table_info.db') dt.create_table({'portal': 'abc', 'id': 'abcd-efgh'}, 'table_info') dt.create_index(['portal', 'id'], 'table_info', unique = True) dt.create_index(['tableId'], 'table_info', unique = False) done = set([tuple(row.keys()) for row in dt.execute('SELECT portal, id FROM table_info')]) for portal in os.listdir('data'): for viewid in os.listdir(os.path.join('data', portal, 'views')): if (portal, viewid) in done: continue d = _dataset_table_info(portal, viewid) if d == None: continue dt.upsert(d, 'table_info')
def users(): dt = DumpTruck(dbname = '/tmp/socrata.db') dt.create_table({'id': 'abcd-efgh'}, 'user') dt.create_index(['id'], 'user', unique = True) _users = {} for portal in os.listdir('data'): for viewid in os.listdir(os.path.join('data', portal, 'views')): handle = open(os.path.join('data', portal, 'views', viewid), 'r') try: view = json.load(handle) except: # *cringe* continue handle.close() if view['owner']['id'] in _users: _users[view['owner']['id']]['views'].add(view['id']) try: _users[view['owner']['id']]['publicationDates'].add((view['id'], view['publicationDate'])) except: return view else: _users[view['owner']['id']] = view['owner'] _users[view['owner']['id']]['views'] = {view['id']} _users[view['owner']['id']]['tables'] = set() _users[view['owner']['id']]['publicationDates'] = set() if view['tableAuthor']['id'] in _users: _users[view['tableAuthor']['id']]['tables'].add(view['tableId']) else: _users[view['tableAuthor']['id']] = view['tableAuthor'] _users[view['tableAuthor']['id']]['views'] = set() _users[view['tableAuthor']['id']]['tables'] = {view['tableId']} _users[view['tableAuthor']['id']]['publicationDates'] = set() for uid in _users.keys(): for key in ['views', 'rights', 'tables']: if key in _users[uid]: _users[uid]['n_' + key] = len(_users[uid][key]) del _users[uid][key] dt.insert(_users.values(), 'user') for uid, user in _users.items(): for viewid, publicationDate in user['publicationDates']: dt.insert({'userid': user['id'], 'viewid': viewid, 'publicationDate': publicationDate}, 'publications', commit = False) dt.commit() return _users
def get_links(softwares = ['ckan','socrata']): dt = DumpTruck('/tmp/open-data.sqlite') dummyrow = dict(zip(['software','catalog','identifier', 'status_code', 'headers', 'error'], (['blah'] * 3) + ([234] * 1) + ([{'a':'b'}] * 2))) dt.create_table(dummyrow, 'links', if_not_exists = True) dt.create_index(['software','catalog','identifier'], 'links', if_not_exists = True, unique = True) for software in softwares: for catalog in read.catalogs(software): if SOCRATA_FIX.get(catalog, 'this is a string, not None') == None: continue try: for row in _check_catalog(software, catalog): dt.upsert(row, 'links') except: print(os.path.join('downloads',software,catalog)) raise
def to_sqlite3(): dt = DumpTruck('/tmp/open-data.sqlite', auto_commit = False) dummyrow = dict(zip(['software','catalog','identifier'], ['blah']*3)) dt.create_table(dummyrow, 'datasets', if_not_exists = True) dt.create_index(['software','catalog','identifier'], 'datasets', if_not_exists = True, unique = True) for table in ['ckan','socrata']: dt.create_table({'catalog':'blah','identifier':'blah'}, table, if_not_exists = True) dt.create_index(['catalog','identifier'], table, if_not_exists = True, unique = True) dt.create_table({'view_id':'abc','table_id':123}, 'socrata_tables') dt.create_index(['view_id'], 'socrata_tables', if_not_exists = True, unique = True) dt.create_index(['table_id'], 'socrata_tables', if_not_exists = True) for dataset in datasets(): row = { 'software': dataset['software'], 'catalog': dataset['catalog'], 'identifier': dataset[SOFTWARE_MAP['identifier'][dataset['software']]], } sql = 'SELECT * FROM datasets WHERE software = ? AND catalog = ? AND identifier = ?' if dt.execute(sql, [row['software'],row['catalog'],row['identifier']]) != []: continue dt.upsert(row, 'datasets') if dataset['software'] == 'socrata': socrata_table = { 'view_id': row['identifier'], 'table_id': dataset['tableId'], } dt.upsert(socrata_table, 'socrata_tables') dt.upsert(dataset,dataset['software']) dt.commit()
def test_if_exists(self): dt = DumpTruck(dbname="/tmp/test.db") dt.create_table({"foo": "bar"}, "zort") with self.assertRaises(sqlite3.OperationalError): dt.create_table({"foo": "bar"}, "zort", error_if_exists=True)
def test_create_table(self): "The first row must have a non-null value so the schema can be defined." dt = DumpTruck(dbname = '/tmp/test.db') with self.assertRaises(ValueError): dt.create_table({'foo': None, 'bar': None}, 'one') dt.close()
def test_bad_insert(self): dt = DumpTruck(dbname = '/tmp/test.db') with self.assertRaises(ValueError): dt.create_table({'a': 'b', 'A': 'B'}, 'foo')
def test_if_exists(self): dt = DumpTruck(dbname = '/tmp/test.db') dt.create_table({'foo': 'bar'}, 'zort') with self.assertRaises(sqlite3.OperationalError): dt.create_table({'foo': 'bar'}, 'zort', error_if_exists = True)
def test_if_not_exists(self): dt = DumpTruck(dbname = '/tmp/test.db') dt.create_table({'foo': 'bar'}, 'baz') dt.create_table({'foo': 'bar'}, 'baz', error_if_exists = False)
def test_if_not_exists(self): dt = DumpTruck(dbname="/tmp/test.db") dt.create_table({"foo": "bar"}, "baz") dt.create_table({"foo": "bar"}, "baz", error_if_exists=False)
import urlparse import json import datetool from dumptruck import DumpTruck from common import dumptruck_to_csv URL = "http://www.scotlandoffice.gov.uk/scotlandoffice/14463.363.html" req = requests.get(URL) doc = lxml.html.fromstring(req.text) dt = DumpTruck(dbname="scotland.db") dt.create_table({"Title": "", "Publication date": "", "Old URL": "", "Body": "", "Attachments": "", "Associated organisations": "", "Associated Document Series": ""}, "statistics") dt.create_index(["Title", "Old URL"], "statistics", unique=True) for link in doc.xpath("//div[@class='wrapper']/ul/li/a"): series_title, series_url = link.text, urlparse.urljoin(URL, link.attrib["href"]) print series_title series_req = requests.get(series_url) series_doc = lxml.html.fromstring(series_req.text) for table_line in series_doc.xpath("//tr[not(@bgcolor) or @bgcolor!='#004093']"): file_pub_date = table_line.xpath("./td[3]")[0].text
def test_bad_insert(self): dt = DumpTruck(dbname="/tmp/test.db") with self.assertRaises(ValueError): dt.create_table({"a": "b", "A": "B"}, "foo")
def test_no_rows_create_table(self): "The insert must have a row so the schema can be defined." dt = DumpTruck(dbname = '/tmp/test.db') with self.assertRaises(ValueError): dt.create_table([], 'two') dt.close()
import finalip_lib as l def read_finalip(path): html = parse(path) trs = html.xpath('//table[@style="border-collapse: collapse; width: 100%;"]/descendant::tr') def do_row(tr): try: return l.parse_row(tr) except: print tostring(tr) raise return map(do_row, trs[2:]) # Schema dt = DumpTruck(dbname = '/tmp/finalip.db') dt.create_table({u'DA Number': u'NAE-2009-01067'}, 'finalip', if_not_exists = True) dt.create_index(['Da Number'], 'finalip', unique = True, if_not_exists = True) # Skip finished stuff pages = set([(row['Year'], row['Month'], row['Page']) for row in dt.execute('SELECT Year, Month, Page FROM finalip')]) # Populate for dirname, subdirnames, filenames in os.walk(os.path.join(os.environ['READER_ROOT'], '..', 'finalips')): if subdirnames != []: continue for filename in filenames: year, month = map(int, dirname.split('/')[-2:]) page = (year, month, filename) if page in pages: continue
if config: log.debug("Config loaded") # DumpTruck initialization dump_truck = DumpTruck(dbname=os.path.expanduser("~/.bakthat.dt"), vars_table="config") if not "backups" in dump_truck.tables(): # We initialize DumpTruck, with dummy data that won't be inserted. dump_truck.create_table( { "stored_filename": "filename.20130227205616.tgz", "size": 1, "metadata": {"is_enc": False}, "backup_date": 1361994976, "filename": "filename", "backend": "s3", "is_deleted": False, "last_updated": 1361994976, "tags": [], "backend_hash": "backendhash", }, "backups", ) dump_truck.create_index(["stored_filename"], "backups", unique=True) if not "inventory" in dump_truck.tables(): dump_truck.create_table({"filename": "filename", "archive_id": "glacier-archive-id"}, "inventory") dump_truck.create_index(["filename"], "inventory", unique=True) if not "jobs" in dump_truck.tables(): dump_truck.create_table({"filename": "filename", "job_id": "job_id"}, "jobs")
def main(): dt = DumpTruck(dbname='metrics.db') dt.create_table({'portal': 'abc', 'date': datetime.date.today()}, 'series') dt.create_index(['portal', 'date'], 'series') dt.upsert(list(table()), 'series')
def test_empty_row_create_table(self): "The schema row must have a non-null value." dt = DumpTruck(dbname = '/tmp/test.db') with self.assertRaises(ValueError): dt.create_table({}, 'two') dt.close()
def main(): dt = DumpTruck(dbname = 'metrics.db') dt.create_table({'portal': 'abc', 'date': datetime.date.today()}, 'series') dt.create_index(['portal', 'date'], 'series') dt.upsert(list(table()), 'series')
) def do_row(tr): try: return l.parse_row(tr) except: print tostring(tr) raise return map(do_row, trs[2:]) # Schema dt = DumpTruck(dbname='/tmp/finalip.db') dt.create_table({u'DA Number': u'NAE-2009-01067'}, 'finalip', if_not_exists=True) dt.create_index(['Da Number'], 'finalip', unique=True, if_not_exists=True) # Skip finished stuff pages = set([(row['Year'], row['Month'], row['Page']) for row in dt.execute('SELECT Year, Month, Page FROM finalip')]) # Populate for dirname, subdirnames, filenames in os.walk( os.path.join(os.environ['READER_ROOT'], '..', 'finalips')): if subdirnames != []: continue for filename in filenames: year, month = map(int, dirname.split('/')[-2:]) page = (year, month, filename)
'http://www.nwo.usace.army.mil', 'http://www.nws.usace.army.mil', 'http://www.nww.usace.army.mil', 'http://www.pof.usace.army.mil', 'http://www.poj.usace.army.mil', 'http://www.saw.usace.army.mil', 'http://www.spa.usace.army.mil', 'http://www.spk.usace.army.mil', 'http://www.spl.usace.army.mil', 'http://www.swf.usace.army.mil', 'http://www.swg.usace.army.mil', 'http://www.tam.usace.army.mil', } if __name__ == '__main__': dt = DumpTruck(dbname = 'usace.db') dt.create_table({'permit_application_number': 'abcd'}, 'notice') dt.create_index(['permit_application_number'], 'notice') for division in parse.locations(get('http://www.usace.army.mil/Locations.aspx')): for district in division['districts']: domain = re.sub(r'.usace.army.mil.*$', '.usace.army.mil', district['href']) path = '/Missions/Regulatory/PublicNotices.aspx' if domain in SKIPPED_DISTRICTS: continue pn_list = None while pn_list == None or pn_list['last_page'] > pn_list['current_page']: pn_list = parse.public_notice_list(get(domain + path)) dt.upsert(list(pn_list['notices']), 'notice')
["http://www.scotlandoffice.gov.uk/scotlandoffice/16668.141.html?tID=16677&mon=jul", "Latest releases"], ["http://www.scotlandoffice.gov.uk/scotlandoffice/16668.141.html?tID=16676&mon=aug", "Latest releases"], ["http://www.scotlandoffice.gov.uk/scotlandoffice/16668.141.html?tID=16678&mon=sep", "Latest releases"], ["http://www.scotlandoffice.gov.uk/scotlandoffice/16668.141.html?tID=16679&mon=oct", "Latest releases"], ["http://www.scotlandoffice.gov.uk/scotlandoffice/16668.141.html?tID=16680&mon=nov", "Latest releases"], ["http://www.scotlandoffice.gov.uk/scotlandoffice/10804.146.html", "Archive releases"], # 2005 ["http://www.scotlandoffice.gov.uk/scotlandoffice/10805.145.html", "Archive releases"], # 2006 ["http://www.scotlandoffice.gov.uk/scotlandoffice/10806.144.html", "Archive releases"], # 2007 ["http://www.scotlandoffice.gov.uk/scotlandoffice/10807.143.html", "Archive releases"], # 2008 ["http://www.scotlandoffice.gov.uk/scotlandoffice/13342.html", "Archive releases"], # 2009 ["http://www.scotlandoffice.gov.uk/scotlandoffice/13661.html", "Archive releases"], # 2010 ["http://www.scotlandoffice.gov.uk/scotlandoffice/15263.html", "Archive releases"], # 2011 ] dt = DumpTruck(dbname="scotland.db") dt.create_table({"Title": "", "Publication date": "", "Old URL": "", "Summary": "", "Attachments": "", "Type": "", "Associated organisations": ""}, "publications") dt.create_index(["Title", "Old URL"], "publications", unique=True) for url, page_type in URLS: for publication in scrape_list_page(url): publication['Type'] = page_type dt.upsert(publication, "publications") dumptruck_to_csv(dt, "publications", "/home/http/scotland/publications.csv")
def test_create_table(self): "The first row must have a non-null value so the schema can be defined." dt = DumpTruck(dbname="/tmp/test.db") with self.assertRaises(ValueError): dt.create_table({"foo": None, "bar": None}, "one") dt.close()