def test_put_seperate_backend(self): # Test with seperate store backends services = [ { 'name': 'attstore1', 'module': 'ion.services.coi.attributestore', 'class': 'AttributeStoreService', 'spawnargs': { 'servicename': 'as1', 'backend_class': 'ion.data.store.Store', 'backend_args': {} } }, { 'name': 'attstore2', 'module': 'ion.services.coi.attributestore', 'class': 'AttributeStoreService', 'spawnargs': { 'servicename': 'as2', 'backend_class': 'ion.data.store.Store', 'backend_args': {} } }, ] sup = yield self._spawn_processes(services) asc1 = AttributeStoreClient(proc=sup, targetname='as1') res1 = yield asc1.put('key1', 'value1') logging.info('Result1 put: ' + str(res1)) res2 = yield asc1.get('key1') logging.info('Result2 get: ' + str(res2)) self.assertEqual(res2, 'value1') res3 = yield asc1.put('key1', 'value2') res4 = yield asc1.get('key1') self.assertEqual(res4, 'value2') res5 = yield asc1.get('non_existing') self.assertEqual(res5, None) asc2 = AttributeStoreClient(proc=sup, targetname='as2') # With separate backends this should return none resx1 = yield asc2.get('key1') self.assertEqual(resx1, None) yield asc1.clear_store() yield asc2.clear_store()
def slc_init(self): # Service life cycle state. # consume the announcement queue self.announce_recv = TopicWorkerReceiver(name=ANNOUNCE_QUEUE, scope='global', process=self, handler=self._recv_announce) # declares queue and starts listening on it yield self.announce_recv.attach() # get topic based routing to all sensor data (for anything missed on the announcement queue) #self.all_data_recv = TopicWorkerReceiver(name="ta_alldata", # scope='global', # binding_key = "ta.*.BHZ", # process=self, # handler=self._recv_data) #yield self.all_data_recv.attach() #yield self.all_data_recv.initialize() #self.counter = 0 self.epu_controller_client = EPUControllerClient() self.attribute_store_client = AttributeStoreClient() yield self._load_sql_def()
def plc_init(self): self.target = self.get_scoped_name('system', "app_controller") self.attribute_store_client = AttributeStoreClient() # take note of time self._timer = time.time() # check spawn args for sqlstreams, start them up as appropriate if self.spawn_args.has_key('agent_args') and self.spawn_args[ 'agent_args'].has_key('sqlstreams'): sqlstreams = self.spawn_args['agent_args']['sqlstreams'] for ssinfo in sqlstreams: ssid = ssinfo['ssid'] inp_queue = ssinfo['sqlt_vars']['inp_queue'] defs = yield self._get_sql_defs(uconf=ssinfo['sqlt_vars']) self.start_sqlstream(ssid, inp_queue, defs) # let controller know we're starting and have some sqlstreams starting, possibly # we call later in order to let it transition out of init state reactor.callLater(0, self.opunit_status)
def test_put_common_backend(self): # Test with cassandra store backend where both services can access common values! services = [ { 'name': 'Junk1', 'module': 'ion.services.coi.attributestore', 'class': 'AttributeStoreService', 'spawnargs': { 'servicename': 'as1', # this is the name of the instance! 'backend_class': 'ion.data.backends.cassandra.CassandraStore', 'backend_args': { 'cass_host_list': ['amoeba.ucsd.edu:9160'], 'keyspace': 'Datastore', 'colfamily': 'DS1', 'cf_super': True, 'namespace': 'ours', 'key': 'Junk' } } }, { 'name': 'Junk2', 'module': 'ion.services.coi.attributestore', 'class': 'AttributeStoreService', 'spawnargs': { 'servicename': 'as2', # this is the name of the instance! 'backend_class': 'ion.data.backends.cassandra.CassandraStore', 'backend_args': { 'cass_host_list': ['amoeba.ucsd.edu:9160'], 'keyspace': 'Datastore', 'colfamily': 'DS1', 'cf_super': True, 'namespace': 'ours', 'key': 'Junk' } } } ] sup = yield self._spawn_processes(services) asc1 = AttributeStoreClient(proc=sup, targetname='as1') res1 = yield asc1.put('key1', 'value1') logging.info('Result1 put: ' + str(res1)) res2 = yield asc1.get('key1') logging.info('Result2 get: ' + str(res2)) self.assertEqual(res2, 'value1') res3 = yield asc1.put('key1', 'value2') res4 = yield asc1.get('key1') self.assertEqual(res4, 'value2') res5 = yield asc1.get('non_existing') self.assertEqual(res5, None) asc2 = AttributeStoreClient(proc=sup, targetname='as2') tres1 = yield asc2.put('tkey1', 'tvalue1') logging.info('tResult1 put: ' + str(tres1)) tres2 = yield asc2.get('tkey1') logging.info('tResult2 get: ' + str(tres2)) self.assertEqual(tres2, 'tvalue1') # Let cassandra register the new entry pu.asleep(5) # With common backends the value should be found. resx1 = yield asc2.get('key1') self.assertEqual( resx1, 'value2', msg='Failed to pull value from second service instance') yield asc1.clear_store() yield asc2.clear_store()