def test_update(self): i, s = self.get_table() q = query.Query() d = { 'foo': 1, 'bar': 'value 1', } pk = i.insert(s, d) self.assertGreater(pk, 0) d = { 'foo': 2, 'bar': 'value 2', } q.set(d) q.is__id(pk) row_count = i.update(s, d, q) # let's pull it out and make sure it persisted q = query.Query() q.is__id(pk) gd = i.get_one(s, q) self.assertEqual(d['foo'], gd['foo']) self.assertEqual(d['bar'], gd['bar']) self.assertEqual(pk, gd["_id"])
def test_delete(self): # try deleting with no table i = self.get_interface() s = self.get_schema() q = query.Query().is_foo(1) r = i.delete(s, q) i, s = self.get_table() # try deleting with no values in the table q = query.Query() q.is_foo(1) r = i.delete(s, q) self.assertEqual(0, r) _ids = self.insert(i, s, 5) # delete all the inserted values q = query.Query() q.in__id(_ids) l = i.get(s, q) self.assertEqual(5, len(l)) r = i.delete(s, q) self.assertEqual(5, r) # verify rows are deleted l = i.get(s, q) self.assertEqual(0, len(l)) # make sure it stuck i.close() i = self.get_interface() l = i.get(s, q) self.assertEqual(0, len(l))
def test__normalize_date_SQL(self): """this tests the common date kwargs you can use (in both SQLight and Postgres) if we ever add other backends this might need to be moved out of the general generator test""" i = self.get_interface() s = Schema( self.get_table_name(), foo=Field(datetime.datetime, True), _id=Field(int, True, pk=True), index_foo=Index('foo'), ) i.set_table(s) pk20 = i.insert(s, {'foo': datetime.datetime(2014, 4, 20)}) pk21 = i.insert(s, {'foo': datetime.datetime(2014, 4, 21)}) q = query.Query() q.is_foo(day=20) d = i.get_one(s, q) self.assertEqual(d['_id'], pk20) q = query.Query() q.is_foo(day=21, month=4) d = i.get_one(s, q) self.assertEqual(d['_id'], pk21) q = query.Query() q.is_foo(day=21, month=3) d = i.get_one(s, q) self.assertFalse(d)
def test_weak_ref(self): i = self.get_interface() table_name_1 = "".join( random.sample(string.ascii_lowercase, random.randint(5, 15))) table_name_2 = "".join( random.sample(string.ascii_lowercase, random.randint(5, 15))) s_1 = Schema(table_name_1, _id=Field(int, pk=True), foo=Field(int, True)) s_2 = Schema( table_name_2, _id=Field(int, pk=True), s_pk=Field(s_1, False), ) i.set_table(s_1) i.set_table(s_2) pk1 = i.insert(s_1, {'foo': 1}) pk2 = i.insert(s_2, {'s_pk': pk1}) q2 = query.Query() q2.is__id(pk2) # make sure it exists and is visible r = i.get_one(s_2, q2) self.assertGreater(len(r), 0) q1 = query.Query() q1.is__id(pk1) i.delete(s_1, q1) r = i.get_one(s_2, q2) self.assertGreater(len(r), 0) self.assertIsNone(r['s_pk'])
def test_transaction_nested_fail_1(self): """make sure 2 new tables in a wrapped transaction work as expected""" i = self.get_interface() table_name_1 = self.get_table_name() table_name_2 = self.get_table_name() s1 = Schema(table_name_1, _id=Field(int, pk=True), foo=Field(int, True)) s2 = Schema( table_name_2, _id=Field(int, pk=True), bar=Field(int, True), s_pk=Field(s1), ) with i.transaction() as connection: pk1 = i.insert(s1, {"foo": 1}, connection=connection) pk2 = i.insert(s2, {"bar": 2, "s_pk": pk1}, connection=connection) q1 = query.Query() q1.is__id(pk1) r1 = i.get_one(s1, q1) self.assertEqual(pk1, r1['_id']) q2 = query.Query() q2.is__id(pk2) r2 = i.get_one(s2, q2) self.assertEqual(pk2, r2['_id']) self.assertEqual(pk1, r2['s_pk'])
def test_transaction_context_manager(self): """make sure the with transaction() context manager works as expected""" i, s = self.get_table() _id = None with i.transaction() as connection: fields = self.get_fields(s) _id = i.insert(s, fields, connection=connection) #_id = self.insert(i, s, 1, connection=connection)[0] self.assertTrue(_id) q = query.Query() q.is__id(_id) d = i.get_one(s, q) self.assertGreater(len(d), 0) with self.assertRaises(RuntimeError): with i.transaction() as connection: fields = self.get_fields(s) _id = i.insert(s, fields, connection=connection) #_id = self.insert(i, s, 1, connection=connection)[0] raise RuntimeError("this should fail") q = query.Query() q.is__id(_id) d = i.get_one(s, q) self.assertEqual(len(d), 0)
def test_transaction_nested_fail_3(self): """make sure 2 tables where the first one already exists works, and second one has 2 refs""" i = self.get_interface() table_name_1 = "{}_1".format(self.get_table_name()) table_name_2 = "{}_2".format(self.get_table_name()) s1 = self.get_schema(foo=Field(int, True)) i.set_table(s1) s2 = self.get_schema( bar=Field(int, True), s_pk=Field(s1, True), s_pk2=Field(s1, True), ) pk1 = i.insert(s1, {"foo": 1}) pk2 = i.insert(s1, {"foo": 1}) pk3 = i.insert(s2, {"bar": 2, "s_pk": pk1, "s_pk2": pk2}) r1 = i.get_one(s1, query.Query().is__id(pk1)) self.assertEqual(r1['_id'], pk1) r2 = i.get_one(s2, query.Query().is__id(pk3)) self.assertEqual(r2['_id'], pk3) self.assertEqual(r2['s_pk'], pk1) self.assertEqual(r2['s_pk2'], pk2)
def test_dropped_pipe(self): """handle a secured pipe like spiped or stunnel restarting while there were active connections NOTE -- currently this is very specific to our environment, this test will most likely always be skipped unless you're testing on our Vagrant box """ # TODO -- make this more reproducible outside of our environment i, s = self.get_table() _id = self.insert(i, s, 1)[0] q = query.Query() q.is__id(_id) d = i.get_one(s, q) self.assertGreater(len(d), 0) exit_code = subprocess.check_call("sudo restart spiped-pg-server", shell=True, stdout=stdnull) time.sleep(1) q = query.Query() q.is__id(_id) d = i.get_one(s, q) self.assertGreater(len(d), 0) exit_code = subprocess.check_call("sudo restart spiped-pg-client", shell=True, stdout=stdnull) time.sleep(1) q = query.Query() q.is__id(_id) d = i.get_one(s, q) self.assertGreater(len(d), 0)
def test_count(self): i, s = self.get_table() # first try it with no rows q = query.Query() r = i.count(s, q) self.assertEqual(0, r) # now try it with rows _ids = self.insert(i, s, 5) q = query.Query() r = i.count(s, q) self.assertEqual(5, r)
def test_get_one(self): i, s = self.get_table() _ids = self.insert(i, s, 2) for _id in _ids: q = query.Query() q.is__id(_id) d = i.get_one(s, q) self.assertEqual(d[s._id.name], _id) q = query.Query() q.is__id(12334342) d = i.get_one(s, q) self.assertEqual({}, d)
def test_db_disconnect(self): """make sure interface can recover if the db disconnects mid script execution""" i, s = self.get_table() _id = self.insert(i, s, 1)[0] q = query.Query() q.is__id(_id) d = i.get_one(s, q) self.assertGreater(len(d), 0) InitD("postgresql").restart() q = query.Query() q.is__id(_id) d = i.get_one(s, q) self.assertGreater(len(d), 0)
def test_get_sql(self): i = self.get_interface() s = self.get_schema() q = query.Query() q.in__id(range(1, 5)) sql, sql_args = i.get_SQL(s, q) self.assertTrue('_id' in sql) self.assertEqual(4, len(sql_args)) q.gt_foo(5) sql, sql_args = i.get_SQL(s, q) self.assertTrue('foo' in sql) self.assertTrue('AND' in sql) self.assertEqual(5, len(sql_args)) q.asc_foo().desc_bar() sql, sql_args = i.get_SQL(s, q) self.assertTrue('ORDER BY' in sql) self.assertTrue('ASC' in sql) self.assertTrue('DESC' in sql) q.limit(222).offset(111) sql, sql_args = i.get_SQL(s, q) self.assertTrue('LIMIT' in sql) self.assertTrue('OFFSET' in sql) self.assertTrue('222' in sql) self.assertTrue('111' in sql)
def test_table_recovery(self): i = self.get_interface() s = self.get_schema() q = query.Query() l = i.get(s, q) self.assertEqual([], l)
def test_field_timestamp(self): table_name = self.get_table_name() schema = self.get_schema(table_name, ZTIMESTAMP=Field(datetime.datetime)) q = query.Query() epoch = datetime.datetime(1970, 1, 1) timestamp = (datetime.datetime.utcnow() - epoch).total_seconds() i = self.create_interface() i.set_table(schema) sql = "INSERT INTO {} (ZTIMESTAMP) VALUES ({:.5f})".format( table_name, timestamp) r = i.query(sql, ignore_result=True) r = i.get_one(schema, q) self.assertEqual((r["ZTIMESTAMP"] - epoch).total_seconds(), round(timestamp, 5)) timestamp = -62167219200 sql = "INSERT INTO {} (ZTIMESTAMP) VALUES ({})".format( table_name, timestamp) r = i.query(sql, ignore_result=True) r = i.get_one(schema, q.offset(1)) self.assertEqual(r["ZTIMESTAMP"], datetime.datetime.min) timestamp = 106751991167 sql = "INSERT INTO {} (ZTIMESTAMP) VALUES ({})".format( table_name, timestamp) r = i.query(sql, ignore_result=True) r = i.get_one(schema, q.offset(2)) self.assertEqual(r["ZTIMESTAMP"], datetime.datetime(5352, 11, 1, 10, 52, 47))
def test__id_insert(self): """this fails, so you should be really careful if you set _id and make sure you set the auto-increment appropriately""" return interface, schema = self.get_table() start = 5 stop = 10 for i in xrange(start, stop): q = query.Query() q.set_fields({'_id': i, 'foo': i, 'bar': 'v{}'.format(i)}) d = interface.set(schema, q) for i in xrange(0, stop): q = query.Query() q.set_fields({'foo': stop + 1, 'bar': 'v{}'.format(stop + 1)}) d = interface.set(schema, q)
def test_set_table(self): i = self.get_interface() s = self.get_schema() r = i.has_table(str(s)) self.assertFalse(r) r = i.set_table(s) r = i.has_table(str(s)) self.assertTrue(r) # make sure known indexes are there indexes = i.get_indexes(s) count = 0 for known_index_name, known_index in s.indexes.items(): for index_name, index_fields in indexes.items(): if known_index.fields == index_fields: count += 1 self.assertEqual(len(s.indexes), count) # make sure more exotic datatypes are respected s_ref = self.get_schema() i.set_table(s_ref) s_ref_id = self.insert(i, s_ref, 1)[0] s = prom.Schema( self.get_table_name(), _id=Field(int, pk=True), one=Field(bool, True), two=Field(int, True, size=50), three=Field(decimal.Decimal), four=Field(float, True, size=10), six=Field( long, True, ), seven=Field(s_ref, False), eight=Field(datetime.datetime), nine=Field(datetime.date), ) r = i.set_table(s) d = { 'one': True, 'two': 50, 'three': decimal.Decimal('1.5'), 'four': 1.987654321, 'six': 40000, 'seven': s_ref_id, 'eight': datetime.datetime(2005, 7, 14, 12, 30), 'nine': datetime.date(2005, 9, 14), } pk = i.insert(s, d) q = query.Query() q.is__id(pk) odb = i.get_one(s, q) #d['five'] = 1.98765 for k, v in d.items(): self.assertEqual(v, odb[k])
def test_transaction_nested_fail_4(self): """ran into a bug where this reared its head and data was lost""" i = self.get_interface() # these 2 tables exist before the transaction starts s1 = self.get_schema(foo=Field(int, True)) i.set_table(s1) s2 = self.get_schema( bar=Field(int, True), s_pk=Field(s1, True), s_pk2=Field(s1, True), ) i.set_table(s2) # this is the table that will be created in the transaction s3 = self.get_schema( che=Field(int, True), s_pk=Field(s1, True), ) pk1 = i.insert(s1, {"foo": 1}) pk12 = i.insert(s1, {"foo": 12}) self.assertEqual(0, i.count(s2, query.Query())) with i.transaction() as connection: # create something and put in table 2 pk2 = i.insert(s2, { "bar": 2, "s_pk": pk1, "s_pk2": pk12 }, connection=connection) # now this should cause the stuff to fail # it fails on the select because a new transaction isn't started, so # it just discards all the current stuff and adds the table, had this # been a mod query (eg, insert) it would not have failed, this is fixed # by wrapping selects in a transaction if an active transaction is found q3 = query.Query() q3.is_s_pk(pk1) pk3 = i.get(s3, q3, connection=connection) self.assertEqual(1, i.count(s2, query.Query()))
def test_db_disconnect(self): """make sure interface can recover if the db disconnects mid script execution, SQLite is a bit different than postgres which is why this method is completely original""" i, s = self.get_table() _id = self.insert(i, s, 1)[0] q = query.Query() q.is__id(_id) d = i.get_one(s, q) self.assertGreater(len(d), 0) i._connection.close() _id = self.insert(i, s, 1)[0] q = query.Query() q.is__id(_id) d = i.get_one(s, q) self.assertGreater(len(d), 0)
def test_in_sql(self): i, s = self.get_table() _ids = self.insert(i, s, 5) q = query.Query() q.in__id(_ids) l = list(i.get(s, q)) self.assertEqual(len(l), 5)
def test_field_bool(self): """There was a bug where SQLite boolean field always returned True, this tests to make sure that is fixed and it won't happen again""" i, s = self.get_table(bar=Field(bool), che=Field(bool)) pk = i.insert(s, {"bar": False, "che": True}) q = query.Query().is__id(pk) d = dict(i.get_one(s, q)) self.assertFalse(d["bar"]) self.assertTrue(d["che"])
def test_null_values(self): i = self.get_interface() s = Schema( self.get_table_name(), _id=Field(int, pk=True), foo=Field(int, False), bar=Field(int, False), ) # add one with non NULL foo pk1 = i.insert(s, {"bar": 1, "foo": 2}) # and one with NULL foo pk2 = i.insert(s, {"bar": 1}) r = i.get_one(s, query.Query().is_bar(1).is_foo(None)) self.assertEqual(pk2, r['_id']) r = i.get_one(s, query.Query().is_bar(1).not_foo(None)) self.assertEqual(pk1, r['_id'])
def test_query_modified_table(self): i = self.get_interface() s = prom.Schema('test_table', one=Field(int, True)) i.set_table(s) # Add new column s.set_field("two", Field(int, False)) q = query.Query() q.is_two(None) # Test if query succeeds i.get_one(s, q)
def test_transaction_context(self): i = self.get_interface() table_name_1 = "{}_1".format(self.get_table_name()) table_name_2 = "{}_2".format(self.get_table_name()) # these 2 tables exist before the transaction starts s1 = Schema(table_name_1, _id=Field(int, pk=True), foo=Field(int, True)) i.set_table(s1) s2 = Schema( table_name_2, _id=Field(int, pk=True), bar=Field(int, True), s_pk=Field(s1, True), ) i.set_table(s2) pk1 = 0 pk2 = 0 try: with i.transaction() as connection: pk1 = i.insert(s1, {"foo": 1}, connection=connection) with i.transaction(connection): pk2 = i.set(s2, { "bar": 2, "s_pk": pk1 }, connection=connection) raise RuntimeError("testing") except Exception as e: pass self.assertEqual(0, i.count(s1, query.Query().is__id(pk1))) self.assertEqual(0, i.count(s2, query.Query().is__id(pk2)))
def test_transaction_nested_fail_2(self): """make sure 2 tables where the first one already exists works in a nested transaction""" i = self.get_interface() s1 = self.get_schema(foo=Field(int, True)) i.set_table(s1) s2 = self.get_schema( bar=Field(int, True), s_pk=Field(s1, True), ) with i.transaction() as connection: pk1 = i.insert(s1, {"foo": 1}, connection=connection) pk2 = i.insert(s2, {"bar": 2, "s_pk": pk1}, connection=connection) r1 = i.get_one(s1, query.Query().is__id(pk1)) self.assertEqual(pk1, r1['_id']) r2 = i.get_one(s2, query.Query().is__id(pk1)) self.assertEqual(pk2, r2['_id']) self.assertEqual(r2['s_pk'], pk1)
def test_get_one_offset(self): """make sure get_one() works as expected when an offset is set""" i, s = self.get_table() q = query.Query() q.set({ 'foo': 1, 'bar': 'v1', }) pk = i.insert(s, q.fields_set.fields) q = query.Query() q.set({ 'foo': 2, 'bar': 'v2', }) pk2 = i.insert(s, q.fields_set.fields) q = query.Query() q.desc__id().offset(1) d = i.get_one(s, q) self.assertEqual(d['_id'], pk) # just make sure to get expected result if no offset q = query.Query() q.desc__id() d = i.get_one(s, q) self.assertEqual(d['_id'], pk2) q = query.Query() q.desc__id().offset(2) d = i.get_one(s, q) self.assertEqual({}, d) q = query.Query() q.desc__id().offset(1).limit(5) d = i.get_one(s, q) self.assertEqual(d['_id'], pk) q = query.Query() q.desc__id().page(2) d = i.get_one(s, q) self.assertEqual(d['_id'], pk) q = query.Query() q.desc__id().page(2).limit(5) d = i.get_one(s, q) self.assertEqual({}, d)
def test_no_connection(self): """this will make sure prom handles it gracefully if there is no connection available ever""" postgresql = InitD("postgresql") postgresql.ignore_failure = False postgresql.stop() try: i = self.create_interface() s = self.get_schema() q = query.Query() with self.assertRaises(prom.InterfaceError): i.get(s, q) finally: postgresql.start()
def test_concurrent_error_recovery(self): """when recovering from an error in a green thread environment one thread could have added the table while the other thread was asleep, this will test to make sure two threads failing at the same time will both recover correctly""" i = self.get_interface() s = self.get_schema() #i.set_table(s) for x in range(1, 3): gevent.spawn(i.insert, s, {'foo': x, 'bar': str(x)}) gevent.wait() q = query.Query() r = list(i.get(s, q)) self.assertEqual(2, len(r))
def test_group_field_name(self): i = self.get_interface() s = Schema( self.get_table_name(), _id=Field(int, True, pk=True), group=Field(str, True), ) i.set_table(s) text = testdata.get_words() pk = i.insert(s, {'group': text}) q = query.Query().is__id(pk) d = dict(i.get_one(s, q)) self.assertEqual(text, d["group"]) self.assertEqual(pk, d["_id"])
def test_get(self): i, s = self.get_table() _ids = self.insert(i, s, 5) q = query.Query() q.in__id(_ids) l = i.get(s, q) self.assertEqual(len(_ids), len(l)) for d in l: self.assertTrue(d[s._id.name] in _ids) q.limit(2) l = i.get(s, q) self.assertEqual(2, len(l)) for d in l: self.assertTrue(d[s._id.name] in _ids)
def test__set_all_fields(self): i, s = self.get_table() s.set_field("che", Field(str, True)) q = query.Query() q.set({ 'foo': 1, 'bar': 'v1', 'che': "this field will cause the query to fail", }) with self.assertRaises(ValueError): ret = i._set_all_fields(s) s = self.get_schema(table_name=str(s)) s.che = str, False ret = i._set_all_fields(s) self.assertTrue(ret)