def test_get_session_fails_without_existing_connection(self): """ Users can't get the default session without having a default connection set. """ with self.assertRaisesRegexp(connection.CQLEngineException, self.no_registered_connection_msg): connection.get_session(connection=None)
def get_new_connection(self, connection_settings): contact_points = connection_settings.pop( 'contact_points', self.default_settings['CONTACT_POINTS'] ) keyspace = connection_settings.pop( 'keyspace', self.settings_dict['DEFAULT_KEYSPACE'] ) self.keyspace = keyspace self.session = connection.get_session() if not(self.session is None or self.session.is_shutdown): return CassandraCursor(self.session) connection.setup( contact_points, keyspace, **connection_settings ) self.cluster = connection.get_cluster() self.session = connection.get_session() return CassandraCursor(self.session)
def setup(self): with self.lock: self.session = connection.get_session() if not (self.session is None or self.session.is_shutdown): # already connected return for option, value in self.session_options.items(): setattr(Session, option, value) connection.setup(self.hosts, self.keyspace, **self.connection_options) self.session = connection.get_session() self.cluster = connection.get_cluster()
def insert_concurrent(cls, df): ''' Insert a dataframe using the execute concurrent. This should be faster for large dataframes than putting in the rows one at a time. ''' ndx = 0 failures = [] if 'modified' not in df.columns: df['modified'] = datetime.datetime.utcnow() # getting unicode instead of str will add a 'u' to the beginning of the col name # so coerse to string first col_names = [str(name) for name in df.columns.tolist()] statement_str = cls.insert_statement(cols=col_names) session = get_session() statement = session.prepare(statement_str) while ndx < df.shape[0]: start = ndx end = ndx + cls._concurrent_chunk_size print 'inserting', start, 'to', end, '/', df.shape[0] this_df = df.iloc[start:end] parameters = this_df.values.tolist() resp = execute_concurrent_with_args(session, statement, parameters=parameters, results_generator=True) failures += cls.parse_failures(resp) ndx = end return failures
def test_static_columns(): class StaticModel(Model): id = columns.Integer(primary_key=True) c = columns.Integer(primary_key=True) name = columns.Text(static=True) drop_table(StaticModel) session = get_session() with mock.patch.object(session, "execute", wraps=session.execute) as m: sync_table(StaticModel) assert m.call_count > 0 statement = m.call_args[0][0].query_string assert '"name" text static' in statement, statement # if we sync again, we should not apply an alter w/ a static sync_table(StaticModel) with mock.patch.object(session, "execute", wraps=session.execute) as m2: sync_table(StaticModel) assert len(m2.call_args_list) == 1 assert "ALTER" not in m2.call_args[0][0].query_string
def create_attached_view(self): ''' This can only be run once the view is attached ''' session = get_session() primary_keys = self._partition_keys.keys() primary_keys.append(self.new_clustering) primary_keys += self._clustering_keys.keys() where = 'WHERE ' for col_name in primary_keys: where += '{0} IS NOT NULL AND '.format(col_name) # remove the final "AND " where = where[:-4] primary = primary_keys[:] primary = tuple(primary) primary = str(primary).replace("'", '"') cmd = """CREATE MATERIALIZED VIEW IF NOT EXISTS {ks}."{viewname}" AS SELECT * FROM {ks}."{table}" {where} PRIMARY KEY {primary} WITH CLUSTERING ORDER BY ({new_clustering} DESC);""".format( ks=self.active_keyspace(), viewname=self.__table_name__, table=self._model_cls.__table_name__, where=where, new_clustering=self.new_clustering, primary=primary) print cmd session.execute(cmd)
def create_keyspace(self): setup_cass(self.seeds, 'system') self.session = get_session() set_session(self.session) drop_keyspace(self.keyspace) create_keyspace_simple(name=self.keyspace, replication_factor=3) self.logger.debug("ks created")
def test_static_columns(self): if PROTOCOL_VERSION < 2: raise unittest.SkipTest( "Native protocol 2+ required, currently using: {0}".format( PROTOCOL_VERSION)) class StaticModel(Model): id = columns.Integer(primary_key=True) c = columns.Integer(primary_key=True) name = columns.Text(static=True) drop_table(StaticModel) session = get_session() with mock.patch.object(session, "execute", wraps=session.execute) as m: sync_table(StaticModel) self.assertGreater(m.call_count, 0) statement = m.call_args[0][0].query_string self.assertIn('"name" text static', statement) # if we sync again, we should not apply an alter w/ a static sync_table(StaticModel) with mock.patch.object(session, "execute", wraps=session.execute) as m2: sync_table(StaticModel) self.assertEqual(len(m2.call_args_list), 0)
def init(): global already_loaded if already_loaded: return connection.setup( ["localhost"], default_keyspace=keyspace, protocol_version=3, load_balancing_policy=DCAwareRoundRobinPolicy(local_dc='DC1'), retry_connect=True) global _cql _cql = connection.get_session() management.create_keyspace_network_topology(keyspace, {'DC1': 1}) management.sync_table(Article, keyspaces=[keyspace]) global _es _es = Elasticsearch(["localhost"], scheme="http", port=9200, sniff_on_start=False, sniff_on_connection_fail=True) if not _es.indices.exists(index=keyspace): print("PUT ES mapping") _es.indices.create(keyspace, json.loads(open('article-mapping.json').read())) already_loaded = True
def test_static_columns(self): if PROTOCOL_VERSION < 2: raise unittest.SkipTest("Native protocol 2+ required, currently using: {0}".format(PROTOCOL_VERSION)) class StaticModel(Model): id = columns.Integer(primary_key=True) c = columns.Integer(primary_key=True) name = columns.Text(static=True) drop_table(StaticModel) session = get_session() with mock.patch.object(session, "execute", wraps=session.execute) as m: sync_table(StaticModel) self.assertGreater(m.call_count, 0) statement = m.call_args[0][0].query_string self.assertIn('"name" text static', statement) # if we sync again, we should not apply an alter w/ a static sync_table(StaticModel) with mock.patch.object(session, "execute", wraps=session.execute) as m2: sync_table(StaticModel) self.assertEqual(len(m2.call_args_list), 0)
def get_new_connection(self, connection_settings): contact_points = connection_settings.pop( 'contact_points', self.default_settings['CONTACT_POINTS'] ) keyspace = connection_settings.pop( 'keyspace', self.settings_dict['DEFAULT_KEYSPACE'] ) self.keyspace = keyspace try: connection.get_connection() except CQLEngineException: connection.setup( contact_points, keyspace, **connection_settings ) self.session = connection.get_session() self.cluster = connection.get_cluster() self.session.default_timeout = None # Should be in config. return CassandraCursor(self.session)
def create_keyspace(self): cluster.max_schema_agreement_wait = 0 setup_cass(self.seeds, 'system') self.session = get_session() set_session(self.session) create_keyspace_simple(name=self.keyspace, replication_factor=3) self.logger.debug("ks created")
def test_default_consistency(self): # verify global assumed default self.assertEqual(Session.default_consistency_level, ConsistencyLevel.LOCAL_ONE) # verify that this session default is set according to connection.setup # assumes tests/cqlengine/__init__ setup uses CL.ONE session = connection.get_session() self.assertEqual(session.default_consistency_level, ConsistencyLevel.ONE)
def truncate(model, keyspace=None): keyspace = keyspace or keyspaces.get() if keyspaces.allowed_to_delete_from_keyspace(keyspace): session = get_session() # now that the keyspace is an acceptable one, we can use it session.set_keyspace(keyspace) # and truncate the table from the development keyspace session.execute('TRUNCATE TABLE "{0}"'.format(model.__table_name__))
def test_default_consistency(self): # verify global assumed default self.assertEqual(Session._default_consistency_level, ConsistencyLevel.LOCAL_ONE) # verify that this session default is set according to connection.setup # assumes tests/cqlengine/__init__ setup uses CL.ONE session = connection.get_session() self.assertEqual(session.default_consistency_level, ConsistencyLevel.ONE)
def get_default_ttl(self, table_name): session = get_session() try: default_ttl = session.execute("SELECT default_time_to_live FROM system_schema.tables " "WHERE keyspace_name = 'cqlengine_test' AND table_name = '{0}'".format(table_name)) except InvalidRequest: default_ttl = session.execute("SELECT default_time_to_live FROM system.schema_columnfamilies " "WHERE keyspace_name = 'cqlengine_test' AND columnfamily_name = '{0}'".format(table_name)) return default_ttl[0]['default_time_to_live']
def test_ttl_included_on_create(self): """ tests that ttls on models work as expected """ session = get_session() with mock.patch.object(session, 'execute') as m: TestTTLModel.ttl(60).create(text="hello blake") query = m.call_args[0][0].query_string self.assertIn("USING TTL", query)
def test_update_includes_ttl(self): session = get_session() model = TestTTLModel.create(text="goodbye blake") with mock.patch.object(session, 'execute') as m: model.ttl(60).update(text="goodbye forever") query = m.call_args[0][0].query_string self.assertIn("USING TTL", query)
def create_network_keyspace(self): cluster.max_schema_agreement_wait = 0 setup_cass(self.seeds, 'system') self.session = get_session() set_session(self.session) dc_map = {'DC1-Data': 3, 'DC1-Analytics': 3} create_keyspace_network_topology(name=self.keyspace, dc_replication_map=dc_map) create_keyspace_simple(name=self.keyspace, replication_factor=3) self.logger.debug("ks network topo created")
def pool_initializer(): from nexustiles.nexustiles import NexusTileService global tile_service tile_service = NexusTileService() # TODO This is a hack to make sure each sub-process uses it's own connection to cassandra. data-access needs to be updated from cassandra.cqlengine import connection from multiprocessing import current_process connection.register_connection(current_process().name, [host.address for host in connection.get_session().hosts]) connection.set_default_connection(current_process().name)
def test_check_if_test_model2_saved_to_db2_keyspace(self): obj_id = 123456 TestModel2.objects.create(id=obj_id) from cassandra.cqlengine.connection import get_session session = get_session() session.set_keyspace("test_db2") self.assertEqual(session.execute("SELECT id FROM test_model2")[0]["id"], obj_id)
def test_check_if_test_model2_saved_to_db2_keyspace(self): obj_id = 123456 TestModel2.objects.create(id=obj_id) from cassandra.cqlengine.connection import get_session session = get_session() session.set_keyspace('test_db2') self.assertEqual( session.execute('SELECT id FROM test_model2')[0]['id'], obj_id)
def test_check_if_test_model_saved_to_db_keyspace(self): now = datetime(2010, 1, 1, 1, 1) obj_id = 123456 TestModel.objects.create(id=obj_id, created_at=now) from cassandra.cqlengine.connection import get_session session = get_session() session.set_keyspace('test_db') self.assertEqual( session.execute('SELECT id FROM test_model')[0]['id'], obj_id)
def __init__(self, seeds, keyspace): self.seeds = seeds self.keyspace = keyspace # TODO for metrics # setup_cass(self.seeds, self.keyspace, # consistency=ConsistencyLevel.TWO, lazy_connect=False, # retry_connect=True, metrics_enabled=True) setup_cass(self.seeds, self.keyspace, consistency=ConsistencyLevel.TWO, lazy_connect=False, retry_connect=True) self.session = get_session() set_session(self.session) self.cluster = get_cluster()
def test_delete_on_subclass_does_not_include_disc_value(self): p1 = Inherit1.create() session = get_session() with mock.patch.object(session, 'execute') as m: Inherit1.objects(partition=p1.partition).delete() # make sure our discriminator value isn't in the CQL # not sure how we would even get here if it was in there # since the CQL would fail. self.assertNotIn("row_type", m.call_args[0][0].query_string)
def test_check_if_test_model_saved_to_db_keyspace(self): now = datetime(2010, 1, 1, 1, 1) obj_id = 123456 TestModel.objects.create(id=obj_id, created_at=now) from cassandra.cqlengine.connection import get_session session = get_session() session.set_keyspace("test_db") self.assertEqual(session.execute("SELECT id FROM test_model")[0]["id"], obj_id)
def test_ttl_included_with_blind_update(self): session = get_session() o = TestTTLModel.create(text="whatever") tid = o.id with mock.patch.object(session, 'execute') as m: TestTTLModel.objects(id=tid).ttl(60).update(text="bacon") query = m.call_args[0][0].query_string self.assertIn("USING TTL", query)
def test_delete_on_polymorphic_subclass_does_not_include_polymorphic_key(self): p1 = Poly1.create() session = get_session() with mock.patch.object(session, 'execute') as m: Poly1.objects(partition=p1.partition).delete() # make sure our polymorphic key isn't in the CQL # not sure how we would even get here if it was in there # since the CQL would fail. self.assertNotIn("row_type", m.call_args[0][0].query_string)
def create_connection(use_gevent: bool = False) -> None: """Create a Session object for above Cluster.""" connection_class = GeventConnection if use_gevent else AsyncoreConnection connection.setup(addresses, MEDTAGGER_KEYSPACE, port=port, load_balancing_policy=RoundRobinPolicy(), connection_class=connection_class, connect_timeout=connect_timeout) session = connection.get_session() session.default_timeout = default_timeout
def test_default_ttl_set(self): session = get_session() o = TestDefaultTTLModel.create(text="some text on ttl") tid = o.id self.assertEqual(o._ttl, TestDefaultTTLModel.__default_ttl__) with mock.patch.object(session, 'execute') as m: TestDefaultTTLModel.objects(id=tid).update(text="aligators expired") query = m.call_args[0][0].query_string self.assertIn("USING TTL", query)
def test_ttl_is_include_with_query_on_update(self): session = get_session() o = TestTTLModel.create(text="whatever") o.text = "new stuff" o = o.ttl(60) with mock.patch.object(session, 'execute') as m: o.save() query = m.call_args[0][0].query_string self.assertIn("USING TTL", query)
def test_check_if_model_saved_to_test_keyspace(self): now = datetime(2010, 1, 1, 1, 1) obj_id = 123456 obj = ExampleModel.objects.create(id=obj_id, created_at=now) self.assertEqual(obj.__keyspace__, 'test_db') from cassandra.cqlengine.connection import get_session session = get_session() session.set_keyspace('test_db') self.assertEqual( session.execute('SELECT id FROM example_model')[0]['id'], obj_id)
def test_delete_on_polymorphic_subclass_does_not_include_polymorphic_key( self): p1 = Poly1.create() session = get_session() with mock.patch.object(session, 'execute') as m: Poly1.objects(partition=p1.partition).delete() # make sure our polymorphic key isn't in the CQL # not sure how we would even get here if it was in there # since the CQL would fail. self.assertNotIn("row_type", m.call_args[0][0].query_string)
def test_override_default_ttl(self): session = get_session() o = TestDefaultTTLModel.create(text="some text on ttl") tid = o.id o.ttl(3600) self.assertEqual(o._ttl, 3600) with mock.patch.object(session, "execute") as m: TestDefaultTTLModel.objects(id=tid).ttl(None).update(text="aligators expired") query = m.call_args[0][0].query_string self.assertNotIn("USING TTL", query)
def test_default_ttl_not_set(self): session = get_session() o = TestTTLModel.create(text="some text") tid = o.id self.assertIsNone(o._ttl) with mock.patch.object(session, 'execute') as m: TestTTLModel.objects(id=tid).update(text="aligators") query = m.call_args[0][0].query_string self.assertNotIn("USING TTL", query)
def __init__(self, seeds, keyspace): self.seeds = seeds self.keyspace = keyspace # TODO configure ConsitencyLevel setup_cass(self.seeds, self.keyspace, consistency=ConsistencyLevel.TWO, lazy_connect=False, retry_connect=True, metrics_enabled=True) #setup_cass(self.seeds, self.keyspace, consistency=ConsistencyLevel.ONE, lazy_connect=False, retry_connect=True) self.session = get_session() set_session(self.session) self.cluster = get_cluster() self.logger = logging.getLogger('pet_race_job')
def execute(self): promises = [] session = get_session() for instance in self.instances: query = instance.__dmlquery__(instance.__class__, instance) query.batch(self._batch) query.save() for query in self._batch.queries: statement = SimpleStatement(str(query)) params = query.get_context() promises.append(session.execute_async(statement, params)) return [r.result() for r in promises]
def test_default_ttl_modify(self): session = get_session() default_ttl = self.get_default_ttl('test_default_ttlmodel') self.assertEqual(default_ttl, 20) TestDefaultTTLModel.__options__ = {'default_time_to_live': 10} sync_table(TestDefaultTTLModel) default_ttl = self.get_default_ttl('test_default_ttlmodel') self.assertEqual(default_ttl, 10) # Restore default TTL TestDefaultTTLModel.__options__ = {'default_time_to_live': 20} sync_table(TestDefaultTTLModel)
def test_paged_result_handling(): # addresses #225 class PagingTest(Model): id = columns.Integer(primary_key=True) val = columns.Integer() sync_table(PagingTest) PagingTest.create(id=1, val=1) PagingTest.create(id=2, val=2) session = get_session() with mock.patch.object(session, 'default_fetch_size', 1): results = PagingTest.objects()[:] assert len(results) == 2
def test_default_ttl_set(self): session = get_session() o = TestDefaultTTLModel.create(text="some text on ttl") tid = o.id # Should not be set, it's handled by Cassandra self.assertIsNone(o._ttl) default_ttl = self.get_default_ttl('test_default_ttlmodel') self.assertEqual(default_ttl, 20) with mock.patch.object(session, 'execute') as m: TestTTLModel.objects(id=tid).update(text="aligators expired") # Should not be set either query = m.call_args[0][0].query_string self.assertNotIn("USING TTL", query)
def test_paged_result_handling(self): if PROTOCOL_VERSION < 2: raise unittest.SkipTest("Paging requires native protocol 2+, currently using: {0}".format(PROTOCOL_VERSION)) # addresses #225 class PagingTest(Model): id = columns.Integer(primary_key=True) val = columns.Integer() sync_table(PagingTest) PagingTest.create(id=1, val=1) PagingTest.create(id=2, val=2) session = get_session() with mock.patch.object(session, 'default_fetch_size', 1): results = PagingTest.objects()[:] assert len(results) == 2
def _connect(self): """ settings differ depending on cluster selected """ try: if self.local_env: self._local_connect() else: self._production_connect() self.session = connection.get_session() return except connection.NoHostAvailable as e: return CassandraError.no_host_available(self.hosts) except OperationTimedOut as e: return CassandraError.operation_timeout(str(e)) except InvalidRequest as e: return CassandraError.invalid_request(self.keyspace, str(e)) except Exception as e: (type_e, value, traceback_prev) = exc_info() backtrace = extract_tb(traceback_prev) return CassandraError.unknown_exception(backtrace, str(e))
def test_paged_result_handling(self): if PROTOCOL_VERSION < 2: raise unittest.SkipTest( "Paging requires native protocol 2+, currently using: {0}". format(PROTOCOL_VERSION)) # addresses #225 class PagingTest(Model): id = columns.Integer(primary_key=True) val = columns.Integer() sync_table(PagingTest) PagingTest.create(id=1, val=1) PagingTest.create(id=2, val=2) session = get_session() with mock.patch.object(session, 'default_fetch_size', 1): results = PagingTest.objects()[:] assert len(results) == 2
def session(self): return connection.get_session()
def create_cursor(self): self.ensure_connection() return CassandraCursor(connection.get_session())