def test_extend_empty(): dd = SQL('sqlite:///:memory:', 'accounts', schema='{name: string, amount: int}') assert not list(dd) dd.extend([]) assert not list(dd)
def test_schema_detection(): dd = SQL('sqlite:///my.db', 'accounts', schema='{name: string, amount: int32}') dd.extend([['Alice', 100], ['Bob', 200]]) dd2 = SQL('sqlite:///my.db', 'accounts') assert dd.schema == dd2.schema
def test_inconsistent_schemas(self): dd = SQL('sqlite:///:memory:', 'badtable', schema='{name: string, amount: string}') dd.extend([('Alice', '100'), ('Bob', '200')]) dd2 = SQL(dd.engine, 'badtable', schema='{name: string, amount: int}') assert list(dd2) == [('Alice', 100), ('Bob', 200)]
def test_schema_detection(): engine = sa.create_engine('sqlite:///:memory:') dd = SQL(engine, 'accounts', schema='{name: string, amount: int32}') dd.extend([['Alice', 100], ['Bob', 200]]) dd2 = SQL(engine, 'accounts') assert dd.schema == dd2.schema if os.path.isfile('my.db'): os.remove('my.db')
def test_chunks(self): schema = '{name: string, amount: int32}' dd = SQL(self.engine, 'testtable3', schema=schema, primary_key='name') data_list = [('Alice', 100), ('Bob', 50), ('Charlie', 200)] data_dict = [{'name': name, 'amount': amount} for name, amount in data_list] chunk = nd.array(data_list, dtype=str(dd.dshape)) dd.extend_chunks([chunk]) assert list(iter(dd)) == data_list or list(iter(dd)) == data_dict self.assertEquals(len(list(dd.chunks(blen=2))), 2)
def test_discovery_engine(): dd = SQL('sqlite:///:memory:', 'accounts', schema='{name: string, amount: int}') dshape = discover(dd.engine, 'accounts') assert dshape == dd.dshape
def test_extension(self): dd = SQL(self.engine, 'testtable2', schema='{name: string, amount: int32}', primary_key='name') data_list = [('Alice', 100), ('Bob', 50)] data_dict = [{'name': name, 'amount': amount} for name, amount in data_list] dd.extend(data_dict) with self.engine.connect() as conn: results = conn.execute('select * from testtable2') self.assertEquals(list(results), data_list) assert list(iter(dd)) == data_list or list(iter(dd)) == data_dict assert (dd.as_py() == tuple(map(tuple, data_list)) or dd.as_py() == data_dict)
def test_indexing(self): dd = SQL(self.engine, 'testtable', schema='{name: string, amount: int, id: int}', primary_key='id') data = [('Alice', 100, 1), ('Bob', 50, 2), ('Charlie', 200, 3)] dd.extend(data) self.assertEqual(set(dd[:, ['id', 'name']]), set(((1, 'Alice'), (2, 'Bob'), (3, 'Charlie')))) self.assertEqual(set(dd[:, 'name']), set(('Alice', 'Bob', 'Charlie'))) assert dd[0, 'name'] in ('Alice', 'Bob', 'Charlie') self.assertEqual(set(dd[:, 0]), set(dd[:, 'name'])) self.assertEqual(set(dd[:, [1, 0]]), set(dd[:, ['amount', 'name']])) self.assertEqual(len(list(dd[:2, 'name'])), 2) self.assertEqual(set(dd[:, :]), set(data)) self.assertEqual(set(dd[:, :2]), set(dd[:, ['name', 'amount']])) self.assertEqual(set(dd[:]), set(dd[:, :])) assert dd[0] in data
def test_csv_gzip_into_sql(): from blaze.data.csv import CSV from blaze.data.sql import into engine = sa.create_engine('sqlite:///:memory:') sql = SQL(engine, 'accounts', schema='{name: string, amount: int32}') with filetext(b'Alice,2\nBob,4', extension='csv.gz', open=gzip.open, mode='wb') as fn: csv = CSV(fn, schema=sql.schema) into(sql, csv) assert list(sql) == list(csv)
def test_setup_with_uri(self): dd = SQL('sqlite:///:memory:', 'accounts', schema='{name: string, amount: int}')