def test_all_size_tiered_options(self): class AllSizeTieredOptionsModel(Model): __compaction__ = SizeTieredCompactionStrategy __compaction_bucket_low__ = .3 __compaction_bucket_high__ = 2 __compaction_min_threshold__ = 2 __compaction_max_threshold__ = 64 __compaction_tombstone_compaction_interval__ = 86400 cid = columns.UUID(primary_key=True) name = columns.Text() drop_table(AllSizeTieredOptionsModel) sync_table(AllSizeTieredOptionsModel) options = get_table_settings( AllSizeTieredOptionsModel).options['compaction_strategy_options'] options = json.loads(options) expected = { u'min_threshold': u'2', u'bucket_low': u'0.3', u'tombstone_compaction_interval': u'86400', u'bucket_high': u'2', u'max_threshold': u'64' } self.assertDictEqual(options, expected)
def setUpClass(cls): if PROTOCOL_VERSION < 4: return super(TestQuerying, cls).setUpClass() drop_table(TestQueryModel) sync_table(TestQueryModel)
def drop_tables(): """Drop all tables in the keyspace. Note: Use this with care!. """ drop_table(ShoppingList) drop_table(User)
def main(): #cass = cassandra() #print("creating cassandra object.") connection.setup(['10.244.35.35'], "test", 1) print("set up a connection to cassandra cluster") echo = echo_alive("10.244.35.35", "10.244.35.35") echo.start() while True: drop_table(Map_0) print("droping map table if exists.") drop_table(Reduce_0) print("droping reduce table if exists.") sync_table(Map_0) print("creating map table.") sync_table(Reduce_0) print("creating reduce table.") print("start a worker task") wk = worker("10.244.35.35", 54321, "10.244.35.35", 12345) #wk.cleanup() wk.listen() wk.echo_hello() wk.recvfile() #wk.stop_listen() word_counts = wk.processing() print("word count to be send: ", word_counts) for tup in word_counts: wk.sendtomaster(tup) end_msg = ("###done###", 0) wk.sendtomaster(end_msg) print("all sent, end of this worker task")
def test_model_over_write(self): """ Test to ensure overwriting of primary keys in model inheritance is allowed This is currently only an issue in PyPy. When PYTHON-504 is introduced this should be updated error out and warn the user @since 3.6.0 @jira_ticket PYTHON-576 @expected_result primary keys can be overwritten via inheritance @test_category object_mapper """ class TimeModelBase(Model): uuid = columns.TimeUUID(primary_key=True) class DerivedTimeModel(TimeModelBase): __table_name__ = 'derived_time' uuid = columns.TimeUUID(primary_key=True, partition_key=True) value = columns.Text(required=False) # In case the table already exists in keyspace drop_table(DerivedTimeModel) sync_table(DerivedTimeModel) uuid_value = uuid1() uuid_value2 = uuid1() DerivedTimeModel.create(uuid=uuid_value, value="first") DerivedTimeModel.create(uuid=uuid_value2, value="second") DerivedTimeModel.objects.filter(uuid=uuid_value)
def test_extra_field(self): drop_table(self.TestModel) sync_table(self.TestModel) self.TestModel.create() execute("ALTER TABLE {0} add blah int".format( self.TestModel.column_family_name(include_keyspace=True))) self.TestModel.objects().all()
def recreate_db(): """Cleans everything (!) and sets up database""" drop_keyspace('documents') drop_table(Documents) create_keyspace_simple('documents', replication_factor=1) sync_table(Documents)
def test_static_columns(): class StaticModel(Model): id = columns.Integer(primary_key=True) c = columns.Integer(primary_key=True) name = columns.Text(static=True) drop_table(StaticModel) session = get_session() with mock.patch.object(session, "execute", wraps=session.execute) as m: sync_table(StaticModel) assert m.call_count > 0 statement = m.call_args[0][0].query_string assert '"name" text static' in statement, statement # if we sync again, we should not apply an alter w/ a static sync_table(StaticModel) with mock.patch.object(session, "execute", wraps=session.execute) as m2: sync_table(StaticModel) assert len(m2.call_args_list) == 1 assert "ALTER" not in m2.call_args[0][0].query_string
def test_static_columns(self): if PROTOCOL_VERSION < 2: raise unittest.SkipTest( "Native protocol 2+ required, currently using: {0}".format( PROTOCOL_VERSION)) class StaticModel(Model): id = columns.Integer(primary_key=True) c = columns.Integer(primary_key=True) name = columns.Text(static=True) drop_table(StaticModel) session = get_session() with mock.patch.object(session, "execute", wraps=session.execute) as m: sync_table(StaticModel) self.assertGreater(m.call_count, 0) statement = m.call_args[0][0].query_string self.assertIn('"name" text static', statement) # if we sync again, we should not apply an alter w/ a static sync_table(StaticModel) with mock.patch.object(session, "execute", wraps=session.execute) as m2: sync_table(StaticModel) self.assertEqual(len(m2.call_args_list), 0)
def test_table_definition(self): """ Tests that creating a table with capitalized column names succeeds """ sync_table(LowercaseKeyModel) sync_table(CapitalizedKeyModel) drop_table(LowercaseKeyModel) drop_table(CapitalizedKeyModel)
def setUpClass(cls): if PROTOCOL_VERSION < 4: raise unittest.SkipTest("Date query tests require native protocol 4+, currently using: {0}".format(PROTOCOL_VERSION)) super(TestQuerying, cls).setUpClass() drop_table(TestQueryModel) sync_table(TestQueryModel)
def test_non_quality_filtering(): class NonEqualityFilteringModel(Model): example_id = columns.UUID(primary_key=True, default=uuid.uuid4) sequence_id = columns.Integer( primary_key=True) # sequence_id is a clustering key example_type = columns.Integer(index=True) created_at = columns.DateTime() drop_table(NonEqualityFilteringModel) sync_table(NonEqualityFilteringModel) # setup table, etc. NonEqualityFilteringModel.create(sequence_id=1, example_type=0, created_at=datetime.now()) NonEqualityFilteringModel.create(sequence_id=3, example_type=0, created_at=datetime.now()) NonEqualityFilteringModel.create(sequence_id=5, example_type=1, created_at=datetime.now()) qA = NonEqualityFilteringModel.objects( NonEqualityFilteringModel.sequence_id > 3).allow_filtering() num = qA.count() assert num == 1, num
def test_collection_with_default(self): """ Test the updates work as expected when an object is deleted @since 3.9 @jira_ticket PYTHON-657 @expected_result the non updated column is None and the updated column has the set value @test_category object_mapper """ sync_table(ModelWithDefaultCollection) item = ModelWithDefaultCollection.create(id=1, mf={1: 1}, dummy=1).save() self.assertEqual(ModelWithDefaultCollection.objects().all().get()._as_dict(), {'id': 1, 'dummy': 1, 'mf': {1: 1}}) item.update(mf={2: 2}) self.assertEqual(ModelWithDefaultCollection.objects().all().get()._as_dict(), {'id': 1, 'dummy': 1, 'mf': {2: 2}}) item.update(mf=None) self.assertEqual(ModelWithDefaultCollection.objects().all().get()._as_dict(), {'id': 1, 'dummy': 1, 'mf': {}}) item = ModelWithDefaultCollection.create(id=2, dummy=2).save() self.assertEqual(ModelWithDefaultCollection.objects().all().get(id=2)._as_dict(), {'id': 2, 'dummy': 2, 'mf': {2: 2}}) item.update(mf={1: 1, 4: 4}) self.assertEqual(ModelWithDefaultCollection.objects().all().get(id=2)._as_dict(), {'id': 2, 'dummy': 2, 'mf': {1: 1, 4: 4}}) drop_table(ModelWithDefaultCollection)
def test_keywords_as_names(self): create_keyspace_simple('keyspace', 1) class table(Model): __keyspace__ = 'keyspace' select = columns.Integer(primary_key=True) table = columns.Text() # create should work drop_table(table) sync_table(table) created = table.create(select=0, table='table') selected = table.objects(select=0)[0] self.assertEqual(created.select, selected.select) self.assertEqual(created.table, selected.table) # alter should work class table(Model): __keyspace__ = 'keyspace' select = columns.Integer(primary_key=True) table = columns.Text() where = columns.Text() sync_table(table) created = table.create(select=1, table='table') selected = table.objects(select=1)[0] self.assertEqual(created.select, selected.select) self.assertEqual(created.table, selected.table) self.assertEqual(created.where, selected.where) drop_keyspace('keyspace')
def test_all_size_tiered_options(self): class AllSizeTieredOptionsModel(Model): __compaction__ = SizeTieredCompactionStrategy __compaction_bucket_low__ = .3 __compaction_bucket_high__ = 2 __compaction_min_threshold__ = 2 __compaction_max_threshold__ = 64 __compaction_tombstone_compaction_interval__ = 86400 cid = columns.UUID(primary_key=True) name = columns.Text() drop_table(AllSizeTieredOptionsModel) sync_table(AllSizeTieredOptionsModel) options = get_table_settings(AllSizeTieredOptionsModel).options['compaction_strategy_options'] options = json.loads(options) expected = {u'min_threshold': u'2', u'bucket_low': u'0.3', u'tombstone_compaction_interval': u'86400', u'bucket_high': u'2', u'max_threshold': u'64'} self.assertDictEqual(options, expected)
def test_static_columns(self): if PROTOCOL_VERSION < 2: raise unittest.SkipTest("Native protocol 2+ required, currently using: {0}".format(PROTOCOL_VERSION)) class StaticModel(Model): id = columns.Integer(primary_key=True) c = columns.Integer(primary_key=True) name = columns.Text(static=True) drop_table(StaticModel) session = get_session() with mock.patch.object(session, "execute", wraps=session.execute) as m: sync_table(StaticModel) self.assertGreater(m.call_count, 0) statement = m.call_args[0][0].query_string self.assertIn('"name" text static', statement) # if we sync again, we should not apply an alter w/ a static sync_table(StaticModel) with mock.patch.object(session, "execute", wraps=session.execute) as m2: sync_table(StaticModel) self.assertEqual(len(m2.call_args_list), 0)
def connect(self): cluster = Cluster() self.session = cluster.connect() connection.setup(["127.0.0.1"], "dataanalysis") drop_table(User) sync_table(User) user = User.create(username="******", created_at=datetime.now()) logging.info("Connected to cluster.") print "Sucessfully connected."
def delete_model( self, model ): column_family = get_column_family( self.connection, model ) db_management.drop_table(column_family)
def test_alter_actually_alters(self): tmp = copy.deepcopy(LeveledCompactionTestTable) drop_table(tmp) sync_table(tmp) tmp.__options__ = {'compaction': {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy'}} sync_table(tmp) table_meta = _get_table_metadata(tmp) self.assertRegexpMatches(table_meta.export_as_string(), '.*SizeTieredCompactionStrategy.*')
def _recreate_keyspace(): logger.info("Dropping tables") drop_table(Channel) drop_table(Performer) drop_table(Song) drop_table(PlayByChannel) drop_table(PlayBySong) _sync_database()
def test_alter_actually_alters(self): tmp = copy.deepcopy(LeveledcompactionTestTable) drop_table(tmp) sync_table(tmp) tmp.__compaction__ = SizeTieredCompactionStrategy tmp.__compaction_sstable_size_in_mb__ = None sync_table(tmp) table_settings = get_table_settings(tmp) self.assertRegexpMatches(table_settings.options['compaction_strategy_class'], '.*SizeTieredCompactionStrategy$')
def tearDownClass(cls): super(ContextQueryConnectionTests, cls).tearDownClass() with ContextQuery(TestModel, connection='cluster') as tm: drop_table(tm) drop_keyspace('ks1', connections=['cluster']) # reset the default connection conn.unregister_connection('fake_cluster') conn.unregister_connection('cluster') setup_connection(DEFAULT_KEYSPACE)
def test_keywords_as_names(self): """ Test for CQL keywords as names test_keywords_as_names tests that CQL keywords are properly and automatically quoted in cqlengine. It creates a keyspace, keyspace, which should be automatically quoted to "keyspace" in CQL. It then creates a table, table, which should also be automatically quoted to "table". It then verfies that operations can be done on the "keyspace"."table" which has been created. It also verifies that table alternations work and operations can be performed on the altered table. @since 2.6.0 @jira_ticket PYTHON-244 @expected_result Cqlengine should quote CQL keywords properly when creating keyspaces and tables. @test_category schema:generation """ # If the keyspace exists, it will not be re-created create_keyspace_simple('keyspace', 1) class table(Model): __keyspace__ = 'keyspace' select = columns.Integer(primary_key=True) table = columns.Text() # In case the table already exists in keyspace drop_table(table) # Create should work sync_table(table) created = table.create(select=0, table='table') selected = table.objects(select=0)[0] self.assertEqual(created.select, selected.select) self.assertEqual(created.table, selected.table) # Alter should work class table(Model): __keyspace__ = 'keyspace' select = columns.Integer(primary_key=True) table = columns.Text() where = columns.Text() sync_table(table) created = table.create(select=1, table='table') selected = table.objects(select=1)[0] self.assertEqual(created.select, selected.select) self.assertEqual(created.table, selected.table) self.assertEqual(created.where, selected.where) drop_keyspace('keyspace')
def test_alter_actually_alters(self): tmp = copy.deepcopy(LeveledcompactionTestTable) drop_table(tmp) sync_table(tmp) tmp.__compaction__ = SizeTieredCompactionStrategy tmp.__compaction_sstable_size_in_mb__ = None sync_table(tmp) table_settings = get_table_settings(tmp) self.assertRegexpMatches( table_settings.options['compaction_strategy_class'], '.*SizeTieredCompactionStrategy$')
def test_alter_options(self): class AlterTable(Model): __compaction__ = LeveledCompactionStrategy __compaction_sstable_size_in_mb__ = 64 user_id = columns.UUID(primary_key=True) name = columns.Text() drop_table(AlterTable) sync_table(AlterTable) AlterTable.__compaction_sstable_size_in_mb__ = 128 sync_table(AlterTable)
def test_compaction_not_altered_without_changes_leveled(self): class LeveledCompactionChangesDetectionTest(Model): __options__ = {'compaction': {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy', 'sstable_size_in_mb': '160', 'tombstone_threshold': '0.125', 'tombstone_compaction_interval': '3600'}} pk = columns.Integer(primary_key=True) drop_table(LeveledCompactionChangesDetectionTest) sync_table(LeveledCompactionChangesDetectionTest) self.assertFalse(_update_options(LeveledCompactionChangesDetectionTest))
def setUpClass(cls): super(BaseQuerySetUsage, cls).setUpClass() drop_table(TestModel) drop_table(IndexedTestModel) sync_table(TestModel) sync_table(IndexedTestModel) sync_table(TestMultiClusteringModel) TestModel.objects.create(test_id=0, attempt_id=0, description='try1', expected_result=5, test_result=30) TestModel.objects.create(test_id=0, attempt_id=1, description='try2', expected_result=10, test_result=30) TestModel.objects.create(test_id=0, attempt_id=2, description='try3', expected_result=15, test_result=30) TestModel.objects.create(test_id=0, attempt_id=3, description='try4', expected_result=20, test_result=25) TestModel.objects.create(test_id=1, attempt_id=0, description='try5', expected_result=5, test_result=25) TestModel.objects.create(test_id=1, attempt_id=1, description='try6', expected_result=10, test_result=25) TestModel.objects.create(test_id=1, attempt_id=2, description='try7', expected_result=15, test_result=25) TestModel.objects.create(test_id=1, attempt_id=3, description='try8', expected_result=20, test_result=20) TestModel.objects.create(test_id=2, attempt_id=0, description='try9', expected_result=50, test_result=40) TestModel.objects.create(test_id=2, attempt_id=1, description='try10', expected_result=60, test_result=40) TestModel.objects.create(test_id=2, attempt_id=2, description='try11', expected_result=70, test_result=45) TestModel.objects.create(test_id=2, attempt_id=3, description='try12', expected_result=75, test_result=45) IndexedTestModel.objects.create(test_id=0, attempt_id=0, description='try1', expected_result=5, test_result=30) IndexedTestModel.objects.create(test_id=1, attempt_id=1, description='try2', expected_result=10, test_result=30) IndexedTestModel.objects.create(test_id=2, attempt_id=2, description='try3', expected_result=15, test_result=30) IndexedTestModel.objects.create(test_id=3, attempt_id=3, description='try4', expected_result=20, test_result=25) IndexedTestModel.objects.create(test_id=4, attempt_id=0, description='try5', expected_result=5, test_result=25) IndexedTestModel.objects.create(test_id=5, attempt_id=1, description='try6', expected_result=10, test_result=25) IndexedTestModel.objects.create(test_id=6, attempt_id=2, description='try7', expected_result=15, test_result=25) IndexedTestModel.objects.create(test_id=7, attempt_id=3, description='try8', expected_result=20, test_result=20) IndexedTestModel.objects.create(test_id=8, attempt_id=0, description='try9', expected_result=50, test_result=40) IndexedTestModel.objects.create(test_id=9, attempt_id=1, description='try10', expected_result=60, test_result=40) IndexedTestModel.objects.create(test_id=10, attempt_id=2, description='try11', expected_result=70, test_result=45) IndexedTestModel.objects.create(test_id=11, attempt_id=3, description='try12', expected_result=75, test_result=45) IndexedTestModel.objects.create(test_id=12, attempt_id=3, description='list12', expected_result=75, test_result=45, test_list=[1, 2, 42], test_set=set([1, 2, 3]), test_map={'1': 1, '2': 2, '3': 3}) IndexedTestModel.objects.create(test_id=13, attempt_id=3, description='list13', expected_result=75, test_result=45, test_list=[3, 4, 5], test_set=set([4, 5, 42]), test_map={'1': 5, '2': 6, '3': 7}) IndexedTestModel.objects.create(test_id=14, attempt_id=3, description='list14', expected_result=75, test_result=45, test_list=[1, 2, 3], test_set=set([1, 2, 3]), test_map={'1': 1, '2': 2, '3': 42})
def test_all_leveled_options(self): class AllLeveledOptionsModel(Model): __options__ = {'compaction': {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy', 'sstable_size_in_mb': '64'}} cid = columns.UUID(primary_key=True) name = columns.Text() drop_table(AllLeveledOptionsModel) sync_table(AllLeveledOptionsModel) table_meta = _get_table_metadata(AllLeveledOptionsModel) self._verify_options(table_meta, AllLeveledOptionsModel.__options__)
def __init__(self, manager): self.manager = manager settings = manager.settings cluster_ips = settings.get('CASSANDRABACKEND_CLUSTER_IPS') cluster_port = settings.get('CASSANDRABACKEND_CLUSTER_PORT') drop_all_tables = settings.get('CASSANDRABACKEND_DROP_ALL_TABLES') keyspace = settings.get('CASSANDRABACKEND_KEYSPACE') keyspace_create = settings.get('CASSANDRABACKEND_CREATE_KEYSPACE_IF_NOT_EXISTS') models = settings.get('CASSANDRABACKEND_MODELS') crawl_id = settings.get('CASSANDRABACKEND_CRAWL_ID') generate_stats = settings.get('CASSANDRABACKEND_GENERATE_STATS') self.models = dict([(name, load_object(klass)) for name, klass in models.items()]) self.cluster = Cluster( contact_points=cluster_ips, port=cluster_port, compression=True, default_retry_policy=RetryPolicy(), reconnection_policy=ConstantReconnectionPolicy(10, 100) ) self.session = self.cluster.connect() self.session.row_factory = dict_factory self.session.encoder.mapping[dict] = self.session.encoder.cql_encode_map_collection self.crawl_id = crawl_id self.generate_stats = generate_stats if keyspace_create: query = """CREATE KEYSPACE IF NOT EXISTS \"%s\" WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 3}""" % (keyspace, ) self.session.execute(query) self.session.set_keyspace(keyspace) connection.set_session(self.session) if drop_all_tables: for key, value in self.models.iteritems(): drop_table(value) for key, value in self.models.iteritems(): if (self.generate_stats is False and key != 'CrawlStatsModel') or self.generate_stats is True: sync_table(value) self._metadata = Metadata(self.session, self.models['MetadataModel'], self.crawl_id, self.generate_stats) self._states = States(self.session, self.models['StateModel'], settings.get('STATE_CACHE_SIZE_LIMIT'), self.crawl_id) self._queue = self._create_queue(settings)
def test_clustering_order_more_complex(self): """ Tests that models can be saved and retrieved """ sync_table(TestClusteringComplexModel) items = list(range(20)) random.shuffle(items) for i in items: TestClusteringComplexModel.create(id=1, clustering_key=i, some_value=2) values = list(TestClusteringComplexModel.objects.values_list('some_value', flat=True)) self.assertEqual([2] * 20, values) drop_table(TestClusteringComplexModel)
def strategy_worker(cls, manager): b = cls(manager) settings = manager.settings drop_all_tables = settings.get('CASSANDRABACKEND_DROP_ALL_TABLES') crawl_id = settings.get('CASSANDRABACKEND_CRAWL_ID') model = b.models['StateModel'] if drop_all_tables: drop_table(model) sync_table(model) b._states = States(b.session, model, settings.get('STATE_CACHE_SIZE_LIMIT'), crawl_id) return b
def test_alter_actually_alters(self): tmp = copy.deepcopy(LeveledCompactionTestTable) drop_table(tmp) sync_table(tmp) tmp.__options__ = { 'compaction': { 'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy' } } sync_table(tmp) table_meta = _get_table_metadata(tmp) self.assertRegexpMatches(table_meta.export_as_string(), '.*SizeTieredCompactionStrategy.*')
def test_compaction_not_altered_without_changes_leveled(self): from cassandra.cqlengine.management import update_compaction class LeveledCompactionChangesDetectionTest(Model): __compaction__ = LeveledCompactionStrategy __compaction_sstable_size_in_mb__ = 160 __compaction_tombstone_threshold__ = 0.125 __compaction_tombstone_compaction_interval__ = 3600 pk = columns.Integer(primary_key=True) drop_table(LeveledCompactionChangesDetectionTest) sync_table(LeveledCompactionChangesDetectionTest) assert not update_compaction(LeveledCompactionChangesDetectionTest)
def test_all_leveled_options(self): class AllLeveledOptionsModel(Model): __compaction__ = LeveledCompactionStrategy __compaction_sstable_size_in_mb__ = 64 cid = columns.UUID(primary_key=True) name = columns.Text() drop_table(AllLeveledOptionsModel) sync_table(AllLeveledOptionsModel) settings = get_table_settings(AllLeveledOptionsModel).options options = json.loads(settings['compaction_strategy_options']) self.assertDictEqual(options, {u'sstable_size_in_mb': u'64'})
def test_alter_options(self): class AlterTable(Model): __options__ = {'compaction': {'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy', 'sstable_size_in_mb': '64'}} user_id = columns.UUID(primary_key=True) name = columns.Text() drop_table(AlterTable) sync_table(AlterTable) table_meta = _get_table_metadata(AlterTable) self.assertRegexpMatches(table_meta.export_as_string(), ".*'sstable_size_in_mb': '64'.*") AlterTable.__options__['compaction']['sstable_size_in_mb'] = '128' sync_table(AlterTable) table_meta = _get_table_metadata(AlterTable) self.assertRegexpMatches(table_meta.export_as_string(), ".*'sstable_size_in_mb': '128'.*")
def test_compaction_not_altered_without_changes_sizetiered(self): class SizeTieredCompactionChangesDetectionTest(Model): __options__ = {'compaction': {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'bucket_high': '20', 'bucket_low': '10', 'max_threshold': '200', 'min_threshold': '100', 'min_sstable_size': '1000', 'tombstone_threshold': '0.125', 'tombstone_compaction_interval': '3600'}} pk = columns.Integer(primary_key=True) drop_table(SizeTieredCompactionChangesDetectionTest) sync_table(SizeTieredCompactionChangesDetectionTest) self.assertFalse(_update_options(SizeTieredCompactionChangesDetectionTest))
def test_all_size_tiered_options(self): class AllSizeTieredOptionsModel(Model): __options__ = {'compaction': {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'bucket_low': '.3', 'bucket_high': '2', 'min_threshold': '2', 'max_threshold': '64', 'tombstone_compaction_interval': '86400'}} cid = columns.UUID(primary_key=True) name = columns.Text() drop_table(AllSizeTieredOptionsModel) sync_table(AllSizeTieredOptionsModel) table_meta = _get_table_metadata(AllSizeTieredOptionsModel) self._verify_options(table_meta, AllSizeTieredOptionsModel.__options__)
def test_concrete_class_table_creation_cycle(self): """ Tests that models with inherited abstract classes can be created, and have io performed """ from cassandra.cqlengine.management import sync_table, drop_table sync_table(ConcreteModelWithCol) w1 = ConcreteModelWithCol.create(pkey=5, data=6) w2 = ConcreteModelWithCol.create(pkey=6, data=7) r1 = ConcreteModelWithCol.get(pkey=5) r2 = ConcreteModelWithCol.get(pkey=6) assert w1.pkey == r1.pkey assert w1.data == r1.data assert w2.pkey == r2.pkey assert w2.data == r2.data drop_table(ConcreteModelWithCol)
def test_all_leveled_options(self): class AllLeveledOptionsModel(Model): __options__ = { 'compaction': { 'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy', 'sstable_size_in_mb': '64' } } cid = columns.UUID(primary_key=True) name = columns.Text() drop_table(AllLeveledOptionsModel) sync_table(AllLeveledOptionsModel) table_meta = _get_table_metadata(AllLeveledOptionsModel) self._verify_options(table_meta, AllLeveledOptionsModel.__options__)
def test_batch_execute_on_exception_succeeds(self): # makes sure if execute_on_exception == True we still apply the batch drop_table(BatchQueryLogModel) sync_table(BatchQueryLogModel) obj = BatchQueryLogModel.objects(k=1) self.assertEqual(0, len(obj)) try: with BatchQuery(execute_on_exception=True) as b: BatchQueryLogModel.batch(b).create(k=1, v=1) raise Exception("Blah") except: pass obj = BatchQueryLogModel.objects(k=1) # should be 1 because the batch should execute self.assertEqual(1, len(obj))
def test_clustering_order_more_complex(self): """ Tests that models can be saved and retrieved """ sync_table(TestClusteringComplexModel) items = list(range(20)) random.shuffle(items) for i in items: TestClusteringComplexModel.create(id=1, clustering_key=i, some_value=2) values = list( TestClusteringComplexModel.objects.values_list('some_value', flat=True)) self.assertEqual([2] * 20, values) drop_table(TestClusteringComplexModel)
def test_compaction_not_altered_without_changes_leveled(self): class LeveledCompactionChangesDetectionTest(Model): __options__ = { 'compaction': { 'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy', 'sstable_size_in_mb': '160', 'tombstone_threshold': '0.125', 'tombstone_compaction_interval': '3600' } } pk = columns.Integer(primary_key=True) drop_table(LeveledCompactionChangesDetectionTest) sync_table(LeveledCompactionChangesDetectionTest) self.assertFalse( _update_options(LeveledCompactionChangesDetectionTest))
def test_batch_execute_on_exception_skips_if_not_specified(self): # makes sure if execute_on_exception == True we still apply the batch drop_table(BatchQueryLogModel) sync_table(BatchQueryLogModel) obj = BatchQueryLogModel.objects(k=2) self.assertEqual(0, len(obj)) try: with BatchQuery() as b: BatchQueryLogModel.batch(b).create(k=2, v=2) raise Exception("Blah") except: pass obj = BatchQueryLogModel.objects(k=2) # should be 0 because the batch should not execute self.assertEqual(0, len(obj))
def upload_patient_information(cass_conf): connection.setup([cass_conf['host']], cass_conf['default_keyspace'], protocol_version=3) drop_table(Patient) #Drop table if it exist sync_table(Patient) #In this case, it will create the table csv_file_name = 'adkb/data/patients.csv' #The file where the data is read from csvFile = open(csv_file_name) #Load csvfile stream reader = csv.DictReader(csvFile) #initalize csv reader with file stream header = reader.fieldnames #Contains the header of the csv (field/column names) #Insert each row into database instance = {} for row in reader: for field in header: instance[field] = row[field] Patient.create(**instance)
def test_compaction_not_altered_without_changes_sizetiered(self): from cassandra.cqlengine.management import update_compaction class SizeTieredCompactionChangesDetectionTest(Model): __compaction__ = SizeTieredCompactionStrategy __compaction_bucket_high__ = 20 __compaction_bucket_low__ = 10 __compaction_max_threshold__ = 200 __compaction_min_threshold__ = 100 __compaction_min_sstable_size__ = 1000 __compaction_tombstone_threshold__ = 0.125 __compaction_tombstone_compaction_interval__ = 3600 pk = columns.Integer(primary_key=True) drop_table(SizeTieredCompactionChangesDetectionTest) sync_table(SizeTieredCompactionChangesDetectionTest) assert not update_compaction(SizeTieredCompactionChangesDetectionTest)
def test_create_drop_table(self): for ks in self.keyspaces: create_keyspace_simple(ks, 1, connections=self.conns) # No connection (default is fake) with self.assertRaises(NoHostAvailable): sync_table(TestModel) # Explicit connections sync_table(TestModel, connections=self.conns) # Explicit drop drop_table(TestModel, connections=self.conns) # Model connection TestModel.__connection__ = 'cluster' sync_table(TestModel) TestModel.__connection__ = None # No connection (default is fake) with self.assertRaises(NoHostAvailable): drop_table(TestModel) # Model connection TestModel.__connection__ = 'cluster' drop_table(TestModel) TestModel.__connection__ = None # Model connection for ks in self.keyspaces: drop_keyspace(ks, connections=self.conns)
def test_all_size_tiered_options(self): class AllSizeTieredOptionsModel(Model): __options__ = { 'compaction': { 'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'bucket_low': '.3', 'bucket_high': '2', 'min_threshold': '2', 'max_threshold': '64', 'tombstone_compaction_interval': '86400' } } cid = columns.UUID(primary_key=True) name = columns.Text() drop_table(AllSizeTieredOptionsModel) sync_table(AllSizeTieredOptionsModel) table_meta = _get_table_metadata(AllSizeTieredOptionsModel) self._verify_options(table_meta, AllSizeTieredOptionsModel.__options__)
def test_compaction_not_altered_without_changes_sizetiered(self): class SizeTieredCompactionChangesDetectionTest(Model): __options__ = { 'compaction': { 'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'bucket_high': '20', 'bucket_low': '10', 'max_threshold': '200', 'min_threshold': '100', 'min_sstable_size': '1000', 'tombstone_threshold': '0.125', 'tombstone_compaction_interval': '3600' } } pk = columns.Integer(primary_key=True) drop_table(SizeTieredCompactionChangesDetectionTest) sync_table(SizeTieredCompactionChangesDetectionTest) self.assertFalse( _update_options(SizeTieredCompactionChangesDetectionTest))