예제 #1
0
    def test_put(self):
        services = [
            {'name':'attstore1','module':'ion.services.coi.attributestore','class':'AttributeStoreService','spawnargs':{'servicename':'as1'}},
            {'name':'attstore2','module':'ion.services.coi.attributestore','class':'AttributeStoreService','spawnargs':{'servicename':'as2'}},
        ]

        sup = yield self._spawn_processes(services)

        asc1 = AttributeStoreClient(proc=sup, targetname='as1')

        res1 = yield asc1.put('key1','value1')
        logging.info('Result1 put: '+str(res1))

        res2 = yield asc1.get('key1')
        logging.info('Result2 get: '+str(res2))
        self.assertEqual(res2, 'value1')

        res3 = yield asc1.put('key1','value2')

        res4 = yield asc1.get('key1')
        self.assertEqual(res4, 'value2')

        res5 = yield asc1.get('non_existing')
        self.assertEqual(res5, None)

        asc2 = AttributeStoreClient(proc=sup, targetname='as2')

        resx1 = yield asc2.get('key1')
        self.assertEqual(resx1, None)
예제 #2
0
    def test_put_seperate_backend(self):
        # Test with seperate store backends
        services = [
            {
                'name': 'attstore1',
                'module': 'ion.services.coi.attributestore',
                'class': 'AttributeStoreService',
                'spawnargs': {
                    'servicename': 'as1',
                    'backend_class': 'ion.data.store.Store',
                    'backend_args': {}
                }
            },
            {
                'name': 'attstore2',
                'module': 'ion.services.coi.attributestore',
                'class': 'AttributeStoreService',
                'spawnargs': {
                    'servicename': 'as2',
                    'backend_class': 'ion.data.store.Store',
                    'backend_args': {}
                }
            },
        ]

        sup = yield self._spawn_processes(services)

        asc1 = AttributeStoreClient(proc=sup, targetname='as1')

        res1 = yield asc1.put('key1', 'value1')
        logging.info('Result1 put: ' + str(res1))

        res2 = yield asc1.get('key1')
        logging.info('Result2 get: ' + str(res2))
        self.assertEqual(res2, 'value1')

        res3 = yield asc1.put('key1', 'value2')

        res4 = yield asc1.get('key1')
        self.assertEqual(res4, 'value2')

        res5 = yield asc1.get('non_existing')
        self.assertEqual(res5, None)

        asc2 = AttributeStoreClient(proc=sup, targetname='as2')

        # With separate backends this should return none
        resx1 = yield asc2.get('key1')
        self.assertEqual(resx1, None)

        yield asc1.clear_store()
        yield asc2.clear_store()
    def test_put_seperate_backend(self):
        # Test with seperate store backends
        services = [
            {'name':'attstore1',
            'module':'ion.services.coi.attributestore',
            'class':'AttributeStoreService',
            'spawnargs':{
                'servicename':'as1',
                'backend_class':'ion.data.store.Store',
                'backend_args':{}
                    }
                },
            {'name':'attstore2',
            'module':'ion.services.coi.attributestore',
            'class':'AttributeStoreService',
            'spawnargs':{
                'servicename':'as2',
                'backend_class':'ion.data.store.Store',
                'backend_args':{}
                    }
                },
        ]

        sup = yield self._spawn_processes(services)

        asc1 = AttributeStoreClient(proc=sup, targetname='as1')

        res1 = yield asc1.put('key1','value1')
        logging.info('Result1 put: '+str(res1))

        res2 = yield asc1.get('key1')
        logging.info('Result2 get: '+str(res2))
        self.assertEqual(res2, 'value1')

        res3 = yield asc1.put('key1','value2')

        res4 = yield asc1.get('key1')
        self.assertEqual(res4, 'value2')

        res5 = yield asc1.get('non_existing')
        self.assertEqual(res5, None)

        asc2 = AttributeStoreClient(proc=sup, targetname='as2')

        # With separate backends this should return none
        resx1 = yield asc2.get('key1')
        self.assertEqual(resx1, None)
        
        yield asc1.clear_store()
        yield asc2.clear_store()
예제 #4
0
class AppAgent(Process):
    """
    Application Agent - lives on the opunit, communicates status with app controller, recieves
    instructions.
    """

    def __init__(self, receiver=None, spawnargs=None, **kwargs):
        """
        Constructor.
        Gathers information about the system this agent is running on.
        """
        Process.__init__(self, receiver=receiver, spawnargs=spawnargs, **kwargs)

        if not isinstance(self.spawn_args, dict):
            self.spawn_args = {}

        if not self.spawn_args.has_key("agent_args") or (self.spawn_args.has_key("agent_args") and not isinstance(self.spawn_args["agent_args"], dict)):
            self.spawn_args["agent_args"] = {}

        self._opunit_id = self.spawn_args["agent_args"].get("opunit_id", str(uuid.uuid4())[:8]) # if one didn't get assigned, make one up to report in to the app controller

        self.metrics = { 'cores' : self._get_cores() }
        self.sqlstreams = {}
        self._fsm_factory_class = kwargs.pop("fsm_factory", SSFSMFactory)

        # for time metrics
        self._timer = time.time()

    @defer.inlineCallbacks
    def plc_init(self):
        self.target = self.get_scoped_name('system', "app_controller")

        self.attribute_store_client = AttributeStoreClient()

        # take note of time
        self._timer = time.time()

        # check spawn args for sqlstreams, start them up as appropriate
        if self.spawn_args.has_key('agent_args') and self.spawn_args['agent_args'].has_key('sqlstreams'):
            sqlstreams = self.spawn_args['agent_args']['sqlstreams']

            for ssinfo in sqlstreams:
                ssid = ssinfo['ssid']
                inp_queue = ssinfo['sqlt_vars']['inp_queue']
                defs = yield self._get_sql_defs(uconf=ssinfo['sqlt_vars'])

                self.start_sqlstream(ssid, inp_queue, defs)

        # let controller know we're starting and have some sqlstreams starting, possibly
        # we call later in order to let it transition out of init state
        reactor.callLater(0, self.opunit_status)


    @defer.inlineCallbacks
    def plc_terminate(self):
        """
        Termination of this App Agent process.
        Attempts to shut down sqlstream clients and sqlstream daemon instances cleanly. If
        they exceed a timeout, they are shut down forcefully.
        """
        yield self.kill_sqlstream_clients()  # kill clients first, they prevent sqlstream daemons from shutting down
        yield self.kill_sqlstreams()
        yield self.opunit_status(BasicStates.S_TERMINATED)

    def kill_sqlstreams(self):
        dl = []
        for sinfo in self.sqlstreams.values():
            if sinfo.has_key('_serverproc') and sinfo['_serverproc'] != None:
                dl.append(self.kill_sqlstream(sinfo['ssid']))

        deflist = defer.DeferredList(dl)
        return deflist

    def kill_sqlstream(self, ssid):
        """
        Shuts down and deletes a single SQLstream instance.
        @return A deferred which will be called back when the steps to stop and delete a SQLstream
                instance are complete.
        """
        return self.sqlstreams[ssid]['_fsm'].run_to_state(SSStates.S_INIT)

    def kill_sqlstream_clients(self):
        dl = []
        for sinfo in self.sqlstreams.values():
            if sinfo.has_key('_taskchain') and sinfo['_taskchain'] != None:
                dl.append(sinfo['_taskchain'].close())

        deflist = defer.DeferredList(dl)
        return deflist

    def _get_sql_pumps_on(self):
        sql_cmd = """
                  ALTER PUMP "SignalsPump" START;
                  ALTER PUMP "DetectionsPump" START;
                  ALTER PUMP "DetectionMessagesPump" START;
                  """
        return sql_cmd

    def _get_sql_pumps_off(self):
        sql_cmd = """
                  ALTER PUMP "DetectionMessagesPump" STOP;
                  ALTER PUMP "DetectionsPump" STOP;
                  ALTER PUMP "SignalsPump" STOP;
                  """
        return sql_cmd

    @defer.inlineCallbacks
    def _get_sql_defs(self, sqldefs=None, uconf={}, **kwargs):
        """
        Returns a fully substituted SQLStream SQL definition string.
        Using keyword arguments, you can update the default params passed in to spawn args.
        """
        assert self.spawn_args.has_key('agent_args') and self.spawn_args['agent_args'].has_key('sqlt_vars'), "Required SQL substitution vars have not been set yet or no 'agent_args' key present in spawnargs to this AppAgent."

        spawn_conf = self.spawn_args['agent_args']['sqlt_vars'].copy()

        # get connection details to broker
        cnfgsrc = self.container.exchange_manager.exchange_space.message_space.connection

        conf = { 'server_host'     : cnfgsrc.hostname,
                 'server_port'     : cnfgsrc.port,
                 'server_user'     : cnfgsrc.userid,
                 'server_password' : cnfgsrc.password,
                 'server_vhost'    : cnfgsrc.virtual_host }

        conf.update(spawn_conf)     # update basic connection info with items from spawn_args to this AppAgent
        conf.update(uconf)          # update config with arguments passed in via uconf param
        conf.update(kwargs)         # update config with any additional keyword args

        defs = sqldefs
        if defs == None:
            # no defs passed here, pull from attribute store
            defs = yield self.attribute_store_client.get(SQLTDEFS_KEY)

        assert defs != None and len(defs) > 0, "No definitions found!"

        template = string.Template(defs)

        defer.returnValue(template.substitute(conf))

    def _get_cores(self):
        """
        Gets the number of processors/cores on the current system.
        Adapted from http://codeliberates.blogspot.com/2008/05/detecting-cpuscores-in-python.html
        """
        if NO_MULTIPROCESSING:
            if hasattr(os, "sysconf"):
                if os.sysconf_names.has_key("SC_NPROCESSORS_ONLN"):
                    # linux + unix
                    ncpus = os.sysconf("SC_NPROCESSORS_ONLN")
                    if isinstance(ncpus, int) and ncpus > 0:
                        return ncpus
                else:
                    # osx
                    return int(os.popen2("sysctl -n hw.ncpu")[1].read())

            return 1
        else:
            return multiprocessing.cpu_count()

    def _get_opunit_status(self, cur_state=None):
        """
        Builds this Agent's status.
        @param cur_state    The current state that should be reported. Expected to be
                            any state of the BasicLifecycleObject states. If left blank,
                            uses the current process' state. This param is used for when
                            reporting state from a state transition method, such as
                            plc_terminate, and that state hasn't taken effect yet, but
                            we want to report as such.
        @returns            A dict containing status.
        """
        status = { 'id'          : self._opunit_id,
                   'proc_id'     : self.id.full,
                   'metrics'     : self.metrics,
                   'state'       : cur_state or self._StateObject__fsm.current_state }

        # filter out any private vars to the Agent inside of the sqlstream dict
        # "private" vars start with _ in the key name
        sqlstreams = {}
        for ssid,sinfo in self.sqlstreams.items():
            sqlstreams[ssid] = {}
            if sinfo.has_key('_fsm'):
                sqlstreams[ssid]['state'] = sinfo['_fsm'].current_state
            else:
                sqlstreams[ssid]['state'] = "?"
            for k,v in sinfo.items():
                if k[0:1] == "_":
                    continue
                sqlstreams[ssid][k] = v

        status['sqlstreams'] = sqlstreams

        return status

    def op_get_opunit_status(self, content, headers, msg):
        """
        Handles a request from the AppController to give it status of this AppAgent.
        """
        status = self._get_opunit_status()
        self.reply_ok(msg, status, {})

    def opunit_status(self, cur_state=None):
        """
        Sends the current status of this Agent/Op Unit.
        """
        content = self._get_opunit_status(cur_state)
        return self.rpc_send(self.target, 'opunit_status', content)

    @defer.inlineCallbacks
    def op_start_sqlstream(self, content, headers, msg):
        """
        Begins the process of starting and configuring a SQLStream instance on this op unit.
        The app controller calls here when it determines the op unit should spawn a new
        processing SQLStream.
        """
        log.info("op_start_sqlstream") # : %s" % str(self.sqlstreams[ssid]))

        ssid        = content['ssid']
        sqlt_vars   = content['sqlt_vars']
        defs        = yield self._get_sql_defs(uconf=sqlt_vars)
        failed      = False
        ex          = None

        try:
            self.start_sqlstream(ssid, sqlt_vars['inp_queue'], defs)
        except ValueError,e:
            failed = True
            ex = e

        if failed:
            resp = { 'response':'failed',
                     'exception':ex }
        else:
            resp = { 'response':'ok' }

        yield self.reply_ok(msg, resp, {})
예제 #5
0
class AppAgent(Process):
    """
    Application Agent - lives on the opunit, communicates status with app controller, recieves
    instructions.
    """
    def __init__(self, receiver=None, spawnargs=None, **kwargs):
        """
        Constructor.
        Gathers information about the system this agent is running on.
        """
        Process.__init__(self,
                         receiver=receiver,
                         spawnargs=spawnargs,
                         **kwargs)

        if not isinstance(self.spawn_args, dict):
            self.spawn_args = {}

        if not self.spawn_args.has_key("agent_args") or (
                self.spawn_args.has_key("agent_args")
                and not isinstance(self.spawn_args["agent_args"], dict)):
            self.spawn_args["agent_args"] = {}

        self._opunit_id = self.spawn_args["agent_args"].get(
            "opunit_id",
            str(uuid.uuid4())[:8]
        )  # if one didn't get assigned, make one up to report in to the app controller

        self.metrics = {'cores': self._get_cores()}
        self.sqlstreams = {}
        self._fsm_factory_class = kwargs.pop("fsm_factory", SSFSMFactory)

        # for time metrics
        self._timer = time.time()

    @defer.inlineCallbacks
    def plc_init(self):
        self.target = self.get_scoped_name('system', "app_controller")

        self.attribute_store_client = AttributeStoreClient()

        # take note of time
        self._timer = time.time()

        # check spawn args for sqlstreams, start them up as appropriate
        if self.spawn_args.has_key('agent_args') and self.spawn_args[
                'agent_args'].has_key('sqlstreams'):
            sqlstreams = self.spawn_args['agent_args']['sqlstreams']

            for ssinfo in sqlstreams:
                ssid = ssinfo['ssid']
                inp_queue = ssinfo['sqlt_vars']['inp_queue']
                defs = yield self._get_sql_defs(uconf=ssinfo['sqlt_vars'])

                self.start_sqlstream(ssid, inp_queue, defs)

        # let controller know we're starting and have some sqlstreams starting, possibly
        # we call later in order to let it transition out of init state
        reactor.callLater(0, self.opunit_status)

    @defer.inlineCallbacks
    def plc_terminate(self):
        """
        Termination of this App Agent process.
        Attempts to shut down sqlstream clients and sqlstream daemon instances cleanly. If
        they exceed a timeout, they are shut down forcefully.
        """
        yield self.kill_sqlstream_clients(
        )  # kill clients first, they prevent sqlstream daemons from shutting down
        yield self.kill_sqlstreams()
        yield self.opunit_status(BasicStates.S_TERMINATED)

    def kill_sqlstreams(self):
        dl = []
        for sinfo in self.sqlstreams.values():
            if sinfo.has_key('_serverproc') and sinfo['_serverproc'] != None:
                dl.append(self.kill_sqlstream(sinfo['ssid']))

        deflist = defer.DeferredList(dl)
        return deflist

    def kill_sqlstream(self, ssid):
        """
        Shuts down and deletes a single SQLstream instance.
        @return A deferred which will be called back when the steps to stop and delete a SQLstream
                instance are complete.
        """
        return self.sqlstreams[ssid]['_fsm'].run_to_state(SSStates.S_INIT)

    def kill_sqlstream_clients(self):
        dl = []
        for sinfo in self.sqlstreams.values():
            if sinfo.has_key('_taskchain') and sinfo['_taskchain'] != None:
                dl.append(sinfo['_taskchain'].close())

        deflist = defer.DeferredList(dl)
        return deflist

    def _get_sql_pumps_on(self):
        sql_cmd = """
                  ALTER PUMP "SignalsPump" START;
                  ALTER PUMP "DetectionsPump" START;
                  ALTER PUMP "DetectionMessagesPump" START;
                  """
        return sql_cmd

    def _get_sql_pumps_off(self):
        sql_cmd = """
                  ALTER PUMP "DetectionMessagesPump" STOP;
                  ALTER PUMP "DetectionsPump" STOP;
                  ALTER PUMP "SignalsPump" STOP;
                  """
        return sql_cmd

    @defer.inlineCallbacks
    def _get_sql_defs(self, sqldefs=None, uconf={}, **kwargs):
        """
        Returns a fully substituted SQLStream SQL definition string.
        Using keyword arguments, you can update the default params passed in to spawn args.
        """
        assert self.spawn_args.has_key(
            'agent_args'
        ) and self.spawn_args['agent_args'].has_key(
            'sqlt_vars'
        ), "Required SQL substitution vars have not been set yet or no 'agent_args' key present in spawnargs to this AppAgent."

        spawn_conf = self.spawn_args['agent_args']['sqlt_vars'].copy()

        # get connection details to broker
        cnfgsrc = self.container.exchange_manager.exchange_space.message_space.connection

        conf = {
            'server_host': cnfgsrc.hostname,
            'server_port': cnfgsrc.port,
            'server_user': cnfgsrc.userid,
            'server_password': cnfgsrc.password,
            'server_vhost': cnfgsrc.virtual_host
        }

        conf.update(
            spawn_conf
        )  # update basic connection info with items from spawn_args to this AppAgent
        conf.update(
            uconf)  # update config with arguments passed in via uconf param
        conf.update(kwargs)  # update config with any additional keyword args

        defs = sqldefs
        if defs == None:
            # no defs passed here, pull from attribute store
            defs = yield self.attribute_store_client.get(SQLTDEFS_KEY)

        assert defs != None and len(defs) > 0, "No definitions found!"

        template = string.Template(defs)

        defer.returnValue(template.substitute(conf))

    def _get_cores(self):
        """
        Gets the number of processors/cores on the current system.
        Adapted from http://codeliberates.blogspot.com/2008/05/detecting-cpuscores-in-python.html
        """
        if NO_MULTIPROCESSING:
            if hasattr(os, "sysconf"):
                if os.sysconf_names.has_key("SC_NPROCESSORS_ONLN"):
                    # linux + unix
                    ncpus = os.sysconf("SC_NPROCESSORS_ONLN")
                    if isinstance(ncpus, int) and ncpus > 0:
                        return ncpus
                else:
                    # osx
                    return int(os.popen2("sysctl -n hw.ncpu")[1].read())

            return 1
        else:
            return multiprocessing.cpu_count()

    def _get_opunit_status(self, cur_state=None):
        """
        Builds this Agent's status.
        @param cur_state    The current state that should be reported. Expected to be
                            any state of the BasicLifecycleObject states. If left blank,
                            uses the current process' state. This param is used for when
                            reporting state from a state transition method, such as
                            plc_terminate, and that state hasn't taken effect yet, but
                            we want to report as such.
        @returns            A dict containing status.
        """
        status = {
            'id': self._opunit_id,
            'proc_id': self.id.full,
            'metrics': self.metrics,
            'state': cur_state or self._StateObject__fsm.current_state
        }

        # filter out any private vars to the Agent inside of the sqlstream dict
        # "private" vars start with _ in the key name
        sqlstreams = {}
        for ssid, sinfo in self.sqlstreams.items():
            sqlstreams[ssid] = {}
            if sinfo.has_key('_fsm'):
                sqlstreams[ssid]['state'] = sinfo['_fsm'].current_state
            else:
                sqlstreams[ssid]['state'] = "?"
            for k, v in sinfo.items():
                if k[0:1] == "_":
                    continue
                sqlstreams[ssid][k] = v

        status['sqlstreams'] = sqlstreams

        return status

    def op_get_opunit_status(self, content, headers, msg):
        """
        Handles a request from the AppController to give it status of this AppAgent.
        """
        status = self._get_opunit_status()
        self.reply_ok(msg, status, {})

    def opunit_status(self, cur_state=None):
        """
        Sends the current status of this Agent/Op Unit.
        """
        content = self._get_opunit_status(cur_state)
        return self.rpc_send(self.target, 'opunit_status', content)

    @defer.inlineCallbacks
    def op_start_sqlstream(self, content, headers, msg):
        """
        Begins the process of starting and configuring a SQLStream instance on this op unit.
        The app controller calls here when it determines the op unit should spawn a new
        processing SQLStream.
        """
        log.info("op_start_sqlstream")  # : %s" % str(self.sqlstreams[ssid]))

        ssid = content['ssid']
        sqlt_vars = content['sqlt_vars']
        defs = yield self._get_sql_defs(uconf=sqlt_vars)
        failed = False
        ex = None

        try:
            self.start_sqlstream(ssid, sqlt_vars['inp_queue'], defs)
        except ValueError, e:
            failed = True
            ex = e

        if failed:
            resp = {'response': 'failed', 'exception': ex}
        else:
            resp = {'response': 'ok'}

        yield self.reply_ok(msg, resp, {})
    def test_put_common_backend(self):
        # Test with cassandra store backend where both services can access common values!
        services = [
            {'name':'Junk1',
             'module':'ion.services.coi.attributestore',
             'class':'AttributeStoreService',
             'spawnargs':{'servicename':'as1', # this is the name of the instance!
                            'backend_class':'ion.data.backends.cassandra.CassandraStore',
                            'backend_args':{'cass_host_list':['amoeba.ucsd.edu:9160'],
                                        'keyspace':'Datastore',
                                        'colfamily':'DS1',
                                        'cf_super':True,
                                        'namespace':'ours',
                                        'key':'Junk'
                                        }}},
            {'name':'Junk2',
            'module':'ion.services.coi.attributestore',
            'class':'AttributeStoreService',
            'spawnargs':{'servicename':'as2', # this is the name of the instance!
                        'backend_class':'ion.data.backends.cassandra.CassandraStore',
                        'backend_args':{'cass_host_list':['amoeba.ucsd.edu:9160'],
                                        'keyspace':'Datastore',
                                        'colfamily':'DS1',
                                        'cf_super':True,
                                        'namespace':'ours',
                                        'key':'Junk'
                                        }}}
                    ]

        sup = yield self._spawn_processes(services)

        asc1 = AttributeStoreClient(proc=sup, targetname='as1')

        res1 = yield asc1.put('key1','value1')
        logging.info('Result1 put: '+str(res1))

        res2 = yield asc1.get('key1')
        logging.info('Result2 get: '+str(res2))
        self.assertEqual(res2, 'value1')

        res3 = yield asc1.put('key1','value2')

        res4 = yield asc1.get('key1')
        self.assertEqual(res4, 'value2')

        res5 = yield asc1.get('non_existing')
        self.assertEqual(res5, None)

        asc2 = AttributeStoreClient(proc=sup, targetname='as2')

        tres1 = yield asc2.put('tkey1','tvalue1')
        logging.info('tResult1 put: '+str(tres1))

        tres2 = yield asc2.get('tkey1')
        logging.info('tResult2 get: '+str(tres2))
        self.assertEqual(tres2, 'tvalue1')

        # Let cassandra register the new entry
        pu.asleep(5)

        # With common backends the value should be found.
        resx1 = yield asc2.get('key1')
        self.assertEqual(resx1, 'value2',msg='Failed to pull value from second service instance')
        
        yield asc1.clear_store()
        yield asc2.clear_store()
예제 #7
0
    def test_put_common_backend(self):
        # Test with cassandra store backend where both services can access common values!
        services = [
            {
                'name': 'Junk1',
                'module': 'ion.services.coi.attributestore',
                'class': 'AttributeStoreService',
                'spawnargs': {
                    'servicename': 'as1',  # this is the name of the instance!
                    'backend_class':
                    'ion.data.backends.cassandra.CassandraStore',
                    'backend_args': {
                        'cass_host_list': ['amoeba.ucsd.edu:9160'],
                        'keyspace': 'Datastore',
                        'colfamily': 'DS1',
                        'cf_super': True,
                        'namespace': 'ours',
                        'key': 'Junk'
                    }
                }
            },
            {
                'name': 'Junk2',
                'module': 'ion.services.coi.attributestore',
                'class': 'AttributeStoreService',
                'spawnargs': {
                    'servicename': 'as2',  # this is the name of the instance!
                    'backend_class':
                    'ion.data.backends.cassandra.CassandraStore',
                    'backend_args': {
                        'cass_host_list': ['amoeba.ucsd.edu:9160'],
                        'keyspace': 'Datastore',
                        'colfamily': 'DS1',
                        'cf_super': True,
                        'namespace': 'ours',
                        'key': 'Junk'
                    }
                }
            }
        ]

        sup = yield self._spawn_processes(services)

        asc1 = AttributeStoreClient(proc=sup, targetname='as1')

        res1 = yield asc1.put('key1', 'value1')
        logging.info('Result1 put: ' + str(res1))

        res2 = yield asc1.get('key1')
        logging.info('Result2 get: ' + str(res2))
        self.assertEqual(res2, 'value1')

        res3 = yield asc1.put('key1', 'value2')

        res4 = yield asc1.get('key1')
        self.assertEqual(res4, 'value2')

        res5 = yield asc1.get('non_existing')
        self.assertEqual(res5, None)

        asc2 = AttributeStoreClient(proc=sup, targetname='as2')

        tres1 = yield asc2.put('tkey1', 'tvalue1')
        logging.info('tResult1 put: ' + str(tres1))

        tres2 = yield asc2.get('tkey1')
        logging.info('tResult2 get: ' + str(tres2))
        self.assertEqual(tres2, 'tvalue1')

        # Let cassandra register the new entry
        pu.asleep(5)

        # With common backends the value should be found.
        resx1 = yield asc2.get('key1')
        self.assertEqual(
            resx1,
            'value2',
            msg='Failed to pull value from second service instance')

        yield asc1.clear_store()
        yield asc2.clear_store()