def slc_init(self):
        # Service life cycle state.

        # consume the announcement queue
        self.announce_recv = TopicWorkerReceiver(name=ANNOUNCE_QUEUE,
                                                 scope='global',
                                                 process=self,
                                                 handler=self._recv_announce)

        # declares queue and starts listening on it
        yield self.announce_recv.attach()

        # get topic based routing to all sensor data (for anything missed on the announcement queue)
        #self.all_data_recv = TopicWorkerReceiver(name="ta_alldata",
        #                                         scope='global',
        #                                         binding_key = "ta.*.BHZ",
        #                                         process=self,
        #                                         handler=self._recv_data)

        #yield self.all_data_recv.attach()
        #yield self.all_data_recv.initialize()
        #self.counter = 0

        self.epu_controller_client = EPUControllerClient()

        self.attribute_store_client = AttributeStoreClient()
        yield self._load_sql_def()
예제 #2
0
    def plc_init(self):
        self.target = self.get_scoped_name('system', "app_controller")

        self.attribute_store_client = AttributeStoreClient()

        # take note of time
        self._timer = time.time()

        # check spawn args for sqlstreams, start them up as appropriate
        if self.spawn_args.has_key('agent_args') and self.spawn_args[
                'agent_args'].has_key('sqlstreams'):
            sqlstreams = self.spawn_args['agent_args']['sqlstreams']

            for ssinfo in sqlstreams:
                ssid = ssinfo['ssid']
                inp_queue = ssinfo['sqlt_vars']['inp_queue']
                defs = yield self._get_sql_defs(uconf=ssinfo['sqlt_vars'])

                self.start_sqlstream(ssid, inp_queue, defs)

        # let controller know we're starting and have some sqlstreams starting, possibly
        # we call later in order to let it transition out of init state
        reactor.callLater(0, self.opunit_status)
class AppControllerService(ServiceProcess):
    """
    Defines an application controller service to perform load balancing.
    """

    declare = ServiceProcess.service_declare(name="app_controller",
                                             version="0.1.0",
                                             dependencies=["attributestore"])

    def __init__(self, *args, **kwargs):
        ServiceProcess.__init__(self, *args, **kwargs)

        self.routing = {
        }  # mapping of queues to a list of bindings (station ids/sensor ids)
        self.workers = {
        }  # mapping of known worker vms to info about those vms (cores / running instances)

        # get configs for current exchange setup from exchange space, queues as per what TopicWorkerReceiver (below) uses
        exchcnfg = self.container.exchange_manager.exchange_space.exchange
        msgcnfg = messaging.worker('temp')

        # for timing
        self._timer = time.time()

        # for reconfigure events
        self._reconfigure_timeout = None

        # provisioner vars are common vars for all worker instances
        self.prov_vars = {
            'sqlt_vars': {
                'inp_exchange': INP_EXCHANGE_NAME,
                'inp_exchange_type': exchcnfg.exchange_type,
                'inp_exchange_durable': str(exchcnfg.durable).lower(),
                'inp_exchange_autodelete': str(exchcnfg.auto_delete).lower(),
                'inp_queue_durable': msgcnfg['durable'],
                'inp_queue_autodelete': msgcnfg['auto_delete'],
                'det_topic': DETECTION_TOPIC,
                'det_exchange': OUT_EXCHANGE_NAME,
                'det_exchange_type': exchcnfg.exchange_type,
                'det_exchange_durable': str(exchcnfg.durable).lower(),
                'det_exchange_autodelete': str(exchcnfg.auto_delete).lower()
            }
        }

    @defer.inlineCallbacks
    def slc_init(self):
        # Service life cycle state.

        # consume the announcement queue
        self.announce_recv = TopicWorkerReceiver(name=ANNOUNCE_QUEUE,
                                                 scope='global',
                                                 process=self,
                                                 handler=self._recv_announce)

        # declares queue and starts listening on it
        yield self.announce_recv.attach()

        # get topic based routing to all sensor data (for anything missed on the announcement queue)
        #self.all_data_recv = TopicWorkerReceiver(name="ta_alldata",
        #                                         scope='global',
        #                                         binding_key = "ta.*.BHZ",
        #                                         process=self,
        #                                         handler=self._recv_data)

        #yield self.all_data_recv.attach()
        #yield self.all_data_recv.initialize()
        #self.counter = 0

        self.epu_controller_client = EPUControllerClient()

        self.attribute_store_client = AttributeStoreClient()
        yield self._load_sql_def()

    @defer.inlineCallbacks
    def _recv_announce(self, data, msg):
        """
        Received an instrument announcement. Set up a binding for it.
        """
        jsdata = json.loads(data)
        station_name = jsdata['content']

        log.info("Instrument Station Announce: " + station_name)

        found = self.has_station_binding(station_name)

        if found:
            log.error("Duplicate announcement")
        else:
            yield self.bind_station(station_name)

        yield msg.ack()

    #def _recv_data(self, data, msg):
    #    #log.info("<-- data packet" + msg.headers.__str__())
    #    log.info("data " + self.counter.__str__())
    #    self.counter += 1
    #    msg.ack()

    @defer.inlineCallbacks
    def bind_station(self, station_name, queue_name=None):
        """
        Binds a station to a queue. Typically you do not specify the queue name, this method
        will find a queue with room. If a queue name is given, no checking will be done - it 
        will simply be added.
        """
        if queue_name == None:
            queue_name = "W%s" % (len(self.routing.keys()) + 1)

            # find a queue with enough room
            added = False
            for queues in self.routing.keys():
                qlen = len(self.routing[queues])
                if qlen < STATIONS_PER_QUEUE:
                    queue_name = queues
                    break

        binding_key = '%s' % station_name

        yield self._create_queue(queue_name, binding_key)

        if not self.routing.has_key(queue_name):
            self.routing[queue_name] = []
            self.request_sqlstream(queue_name)

        self.routing[queue_name].append(station_name)

        log.info("Created binding %s to queue %s" % (binding_key, queue_name))

    @defer.inlineCallbacks
    def _create_queue(self, queue_name, binding_key):
        """
        Creates a queue and/or binding to a queue (just the binding if the queue exists).
        TODO: replace this with proper method of doing so.
        """
        recv = TopicWorkerReceiver(name=queue_name,
                                   scope='global',
                                   binding_key=binding_key,
                                   process=self)
        yield recv.initialize()  # creates queue but does not listen

    def request_sqlstream(self, queue_name, op_unit_id=None):
        """
        Requests a SQLStream operational unit to be created, or an additional SQLStream on an exiting operational unit.
        @param queue_name   The queue the SQL Stream unit should consume from.
        @param op_unit_id   The operational unit id that should be used to create a SQL Stream instance. If specified, will always create on that op unit. Otherwise, it will find available space on an existing VM or create a new VM.
        """

        # if this var is true, at the end of this method, instead of reconfiguring via
        # the decision engine, we will directly ask the agent on op_unit_id to spawn the
        # sqlstream engine. This will hopefully be taken out when we can reconfigure
        # workers on the fly.
        direct_request = False

        if op_unit_id != None and not self.workers.has_key(op_unit_id):
            log.error("request_sqlstream: op_unit (%s) requested but unknown" %
                      op_unit_id)

        if op_unit_id == None:
            # find an available op unit
            for (worker, info) in self.workers.items():
                availcores = info['metrics']['cores'] - (
                    len(info['sqlstreams']) * CORES_PER_SQLSTREAM)
                if availcores >= CORES_PER_SQLSTREAM:
                    log.info(
                        "request_sqlstream - asking existing operational unit (%s) to spawn new SQLStream"
                        % worker)
                    # Request spawn new sqlstream instance on this worker
                    # wait for rpc message to app controller that says sqlstream is up
                    op_unit_id = worker

                    direct_request = True

                    # record the fact we are using this worker now
                    # TODO : needs to be an integer to indicate number of starting up, or a
                    # unique key per each starter
                    #info['sqlstreams']['spawning'] = True
                    break

        if op_unit_id == None:
            op_unit_id = str(uuid.uuid4())[:8]
            log.info("request_sqlstream - requesting new operational unit %s" %
                     op_unit_id)

        # now we have an op_unit_id, update the config
        if not self.workers.has_key(op_unit_id):
            self.workers[op_unit_id] = {
                'metrics': {
                    'cores': 2
                },  # all workers should have at least two, will be updated when status is updated
                'state': '',
                'sqlstreams': {}
            }
            streamcount = 0
        else:
            streamcount = len(self.workers[op_unit_id]['sqlstreams'])

        ssid = str(streamcount + 1)

        stream_conf = {'sqlt_vars': {'inp_queue': queue_name}, 'ssid': ssid}

        self.workers[op_unit_id]['sqlstreams'][ssid] = {
            'conf': stream_conf,
            'state': ''
        }

        if direct_request == True:
            self._start_sqlstream(op_unit_id, stream_conf)
        else:
            self.request_reconfigure()  # schedule a reconfigure event!

    def request_reconfigure(self):
        """
        Rate limiter for actual request reconfigure call.
        Waits 4 seconds for any more reconfigure attempts, each of which delays the call by another 4 seconds.
        When the timeout finally calls, the real reconfigure is sent.
        """
        if self._reconfigure_timeout != None and self._reconfigure_timeout.active(
        ):
            log.info(
                "request_reconfigure: delay already active, resetting to 4 seconds"
            )
            self._reconfigure_timeout.reset(4)
        else:

            def callReconfigure():
                log.info(
                    "request_reconfigure: delay complete, actually performing reconfigure"
                )
                self._reconfigure_timeout = None
                self._request_reconfigure()

            log.info(
                "request_reconfigure: starting delay to 4 seconds to prevent flooding EPU controller"
            )
            self._reconfigure_timeout = reactor.callLater(4, callReconfigure)

    def _request_reconfigure(self):
        """
        Requests a reconfiguration from the Decision Engine. This takes care of provisioning
        workers.

        This method builds the JSON required to reconfigure/configure the decision engine.
        """

        # TODO: likely does not need to send prov vars every time as this is reconfigure

        provvars = self.prov_vars.copy()
        #provvars['sqldefs'] = provvars['sqldefs'].replace("$", "$$")    # escape template vars once so it doesn't get clobbered in provisioner replacement

        conf = {
            'preserve_n': len(self.workers),
            #PROVISIONER_VARS_KEY : self.prov_vars,
            'unique_instances': {}
        }

        for (wid, winfo) in self.workers.items():
            conf['unique_instances'][wid] = {'agent_args': {'sqlstreams': []}}
            conf['unique_instances'][wid]['agent_args'].update(self.prov_vars)
            ssdefs = conf['unique_instances'][wid]['agent_args']['sqlstreams']
            for (ssid, ssinfo) in winfo['sqlstreams'].items():
                ssdefs.append({
                    'ssid': ssinfo['conf']['ssid'],
                    'sqlt_vars': ssinfo['conf']['sqlt_vars']
                })

        if DEBUG_WRITE_PROV_JSON:
            f = open('/tmp/prov.json', 'w')
            json.dump(conf, f, indent=1)
            f.close()
            log.debug(
                "Wrote /tmp/prov.json due to DEBUG_WRITE_PROV_JSON being on in the config."
            )

            for (wid, winfo) in conf['unique_instances'].items():
                wdict = winfo.copy()
                wdict['agent_args']['opunit_id'] = wid

                f = open('/tmp/sa-' + wid + '.json', 'w')
                json.dump(wdict, f, indent=1)
                f.close()

                log.debug("Wrote /tmp/sa-%s.json." % wid)

            # merge and write individual worker configs while we're at it
            #for (wid, winfo) in self.workers.items():
            #    wdict = { 'agent_args': { 'opunit_id' : wid,
            #                              'sqlstreams': str(conf['unique_instances'][wid]['sqlstreams']),   # TODO: unstringify this
            #                              'sqlt_vars' : self.prov_vars['sqlt_vars'] } }

            #    f = open('/tmp/sa-' + wid + '.json', 'w')
            #    json.dump(wdict, f, indent=1)
            #    f.close()

        self.epu_controller_client.reconfigure(conf)

        # record the time we sent this
        self._timer = time.time()

    def has_station_binding(self, station_name):
        """
        Returns true if we know about this station.
        """
        for queues in self.routing.keys():
            found = station_name in self.routing[queues]
            if found:
                return True

        return False

    def op_opunit_status(self, content, headers, msg):
        """
        Handles an application agent reporting an operational unit's status.
        Details include its current state, metrics about the system, status of
        SQLstream instances.
        """
        self._update_opunit_status(content)
        self.reply_ok(msg, {'value': 'ok'}, {})

    def request_opunit_status(self, opunit_id):
        """
        Asks an AppAgent to report in its status.
        """
        proc_id = self.workers[opunit_id]['proc_id']
        d = self.rpc_send(proc_id, 'get_opunit_status', {})
        d.addCallback(lambda res: self._update_opunit_status(res[0]))

    def _update_opunit_status(self, status):
        """
        Internal method to handle updating an op unit's status.
        Status updates can either come from heartbeats initiated by the AppAgent, or
        on request from the AppController. This method handles both of those.
        """
        opunit_id = status['id']
        proc_id = status['proc_id']
        state = status['state']
        metrics = status['metrics']
        sqlstreams = status['sqlstreams']

        sstext = ""
        for ssid, sinfo in sqlstreams.items():
            sstext += "(id: %s status: %s queue: %s)" % (ssid, sinfo['state'],
                                                         sinfo['inp_queue'])

        # get amount of time since we requested opunits
        timediff = time.time() - self._timer

        log.info(
            "Op Unit (%s) status update (+%s sec) : state (%s), sqlstreams (%d): %s"
            % (opunit_id, str(timediff), state, len(sqlstreams), sstext))

        if not self.workers.has_key(status['id']):
            self.workers[status['id']] = {}

        self.workers[opunit_id].update({
            'metrics': metrics,
            'state': state,
            'proc_id': proc_id,
            'sqlstreams': sqlstreams
        })

        # display a message if all known opunits are running
        allstate = [
            ssinfo.get('state', None) for ssinfo in [
                winfo['sqlstreams'] for winfo in self.workers.values()
                if len(winfo['sqlstreams']) > 0
            ]
        ]
        if set(allstate) == set(["SUCCESS"]):
            log.info("All known workers are running (+%s sec)" % timediff)

    def _start_sqlstream(self, op_unit_id, conf):
        """
        Tells an op unit to start a SQLStream instance.
        """
        proc_id = self.workers[op_unit_id]['proc_id']
        self.rpc_send(proc_id, 'start_sqlstream', conf)

    def _load_sql_def(self):
        """
        Loads SQL Templates from disk and puts them in a store.
        Called at startup.

        XXX fix:
        Gets SQLStream detection application SQL definitions, either from
        disk or in memory. SQL files stored on disk are loaded once and stored
        in memory after they have been translated through string.Template.

        You may override the SQL defs by sending an RPC message ("set_sql_defs") to
        the Application Controller. These defs will take the place of the current
        in memory defs. They are expected to be templates, in which certain vars will be
        updated. See op_set_sql_defs for more information.
        """
        fulltemplatelist = []
        for filename in ["catalog.sqlt", "funcs.sqlt", "detections.sqlt"]:
            f = resource_stream(__name__, "data/%s" % filename)
            #f = open(os.path.join(os.path.dirname(__file__), "app_controller_service", filename), "r")
            fulltemplatelist.extend(f.readlines())
            f.close()

        fulltemplate = "".join(fulltemplatelist)

        self.attribute_store_client.put(SQLTDEFS_KEY, fulltemplate)

    def op_set_sql_defs(self, content, headers, msg):
        """
        Updates the current cached SQL defs for the SQLStream detection application.
        This overrides what is found on the disk.

        Note it does not update the SQL files on disk, so if the AppControllerService is
        restarted, it will need to be updated with the current defs again.

        This method expects that the only key in content, also named content, is a full 
        SQL definition (the concatenation of "catalog.sqlt" and "detections.sqlt") with
        Python string.Template vars as substitution points for the following variables:

        * inp_queue                 - The input queue name to read messages from.
        * inp_queue_autodelete      - The input queue's auto_delete setting.
        * inp_queue_durable         - The input queue's durable setting.
        * inp_exchange              - The exchange where the input queue resides.
        * inp_exchange_type         - The exchange's type (topic/fanout/direct).
        * inp_exchange_durable      - The exchange's durable setting.
        * inp_exchange_autodelete   - The exchange's auto_delete setting.
        * det_topic                 - The topic string that should be used for detections.
        * det_exchange              - The exchange where detections should be published.
        * det_exchange_type         - The detection exchange's type (topic/fanout/direct).
        * det_exchange_durable      - The detection exchange's durable setting.
        * det_exchange_autodelete   - The detection exchange's auto_delete setting.

        If these variables are not present, no error is thrown - it will use whatever you
        gave it. So your updated SQL definitions may hardcode the variables above.
        """
        defs = content['content']
        self.attribute_store_client.put(SQLTDEFS_KEY, defs)
        self.reply_ok(msg, {'value': 'ok'}, {})
예제 #4
0
class AppAgent(Process):
    """
    Application Agent - lives on the opunit, communicates status with app controller, recieves
    instructions.
    """
    def __init__(self, receiver=None, spawnargs=None, **kwargs):
        """
        Constructor.
        Gathers information about the system this agent is running on.
        """
        Process.__init__(self,
                         receiver=receiver,
                         spawnargs=spawnargs,
                         **kwargs)

        if not isinstance(self.spawn_args, dict):
            self.spawn_args = {}

        if not self.spawn_args.has_key("agent_args") or (
                self.spawn_args.has_key("agent_args")
                and not isinstance(self.spawn_args["agent_args"], dict)):
            self.spawn_args["agent_args"] = {}

        self._opunit_id = self.spawn_args["agent_args"].get(
            "opunit_id",
            str(uuid.uuid4())[:8]
        )  # if one didn't get assigned, make one up to report in to the app controller

        self.metrics = {'cores': self._get_cores()}
        self.sqlstreams = {}
        self._fsm_factory_class = kwargs.pop("fsm_factory", SSFSMFactory)

        # for time metrics
        self._timer = time.time()

    @defer.inlineCallbacks
    def plc_init(self):
        self.target = self.get_scoped_name('system', "app_controller")

        self.attribute_store_client = AttributeStoreClient()

        # take note of time
        self._timer = time.time()

        # check spawn args for sqlstreams, start them up as appropriate
        if self.spawn_args.has_key('agent_args') and self.spawn_args[
                'agent_args'].has_key('sqlstreams'):
            sqlstreams = self.spawn_args['agent_args']['sqlstreams']

            for ssinfo in sqlstreams:
                ssid = ssinfo['ssid']
                inp_queue = ssinfo['sqlt_vars']['inp_queue']
                defs = yield self._get_sql_defs(uconf=ssinfo['sqlt_vars'])

                self.start_sqlstream(ssid, inp_queue, defs)

        # let controller know we're starting and have some sqlstreams starting, possibly
        # we call later in order to let it transition out of init state
        reactor.callLater(0, self.opunit_status)

    @defer.inlineCallbacks
    def plc_terminate(self):
        """
        Termination of this App Agent process.
        Attempts to shut down sqlstream clients and sqlstream daemon instances cleanly. If
        they exceed a timeout, they are shut down forcefully.
        """
        yield self.kill_sqlstream_clients(
        )  # kill clients first, they prevent sqlstream daemons from shutting down
        yield self.kill_sqlstreams()
        yield self.opunit_status(BasicStates.S_TERMINATED)

    def kill_sqlstreams(self):
        dl = []
        for sinfo in self.sqlstreams.values():
            if sinfo.has_key('_serverproc') and sinfo['_serverproc'] != None:
                dl.append(self.kill_sqlstream(sinfo['ssid']))

        deflist = defer.DeferredList(dl)
        return deflist

    def kill_sqlstream(self, ssid):
        """
        Shuts down and deletes a single SQLstream instance.
        @return A deferred which will be called back when the steps to stop and delete a SQLstream
                instance are complete.
        """
        return self.sqlstreams[ssid]['_fsm'].run_to_state(SSStates.S_INIT)

    def kill_sqlstream_clients(self):
        dl = []
        for sinfo in self.sqlstreams.values():
            if sinfo.has_key('_taskchain') and sinfo['_taskchain'] != None:
                dl.append(sinfo['_taskchain'].close())

        deflist = defer.DeferredList(dl)
        return deflist

    def _get_sql_pumps_on(self):
        sql_cmd = """
                  ALTER PUMP "SignalsPump" START;
                  ALTER PUMP "DetectionsPump" START;
                  ALTER PUMP "DetectionMessagesPump" START;
                  """
        return sql_cmd

    def _get_sql_pumps_off(self):
        sql_cmd = """
                  ALTER PUMP "DetectionMessagesPump" STOP;
                  ALTER PUMP "DetectionsPump" STOP;
                  ALTER PUMP "SignalsPump" STOP;
                  """
        return sql_cmd

    @defer.inlineCallbacks
    def _get_sql_defs(self, sqldefs=None, uconf={}, **kwargs):
        """
        Returns a fully substituted SQLStream SQL definition string.
        Using keyword arguments, you can update the default params passed in to spawn args.
        """
        assert self.spawn_args.has_key(
            'agent_args'
        ) and self.spawn_args['agent_args'].has_key(
            'sqlt_vars'
        ), "Required SQL substitution vars have not been set yet or no 'agent_args' key present in spawnargs to this AppAgent."

        spawn_conf = self.spawn_args['agent_args']['sqlt_vars'].copy()

        # get connection details to broker
        cnfgsrc = self.container.exchange_manager.exchange_space.message_space.connection

        conf = {
            'server_host': cnfgsrc.hostname,
            'server_port': cnfgsrc.port,
            'server_user': cnfgsrc.userid,
            'server_password': cnfgsrc.password,
            'server_vhost': cnfgsrc.virtual_host
        }

        conf.update(
            spawn_conf
        )  # update basic connection info with items from spawn_args to this AppAgent
        conf.update(
            uconf)  # update config with arguments passed in via uconf param
        conf.update(kwargs)  # update config with any additional keyword args

        defs = sqldefs
        if defs == None:
            # no defs passed here, pull from attribute store
            defs = yield self.attribute_store_client.get(SQLTDEFS_KEY)

        assert defs != None and len(defs) > 0, "No definitions found!"

        template = string.Template(defs)

        defer.returnValue(template.substitute(conf))

    def _get_cores(self):
        """
        Gets the number of processors/cores on the current system.
        Adapted from http://codeliberates.blogspot.com/2008/05/detecting-cpuscores-in-python.html
        """
        if NO_MULTIPROCESSING:
            if hasattr(os, "sysconf"):
                if os.sysconf_names.has_key("SC_NPROCESSORS_ONLN"):
                    # linux + unix
                    ncpus = os.sysconf("SC_NPROCESSORS_ONLN")
                    if isinstance(ncpus, int) and ncpus > 0:
                        return ncpus
                else:
                    # osx
                    return int(os.popen2("sysctl -n hw.ncpu")[1].read())

            return 1
        else:
            return multiprocessing.cpu_count()

    def _get_opunit_status(self, cur_state=None):
        """
        Builds this Agent's status.
        @param cur_state    The current state that should be reported. Expected to be
                            any state of the BasicLifecycleObject states. If left blank,
                            uses the current process' state. This param is used for when
                            reporting state from a state transition method, such as
                            plc_terminate, and that state hasn't taken effect yet, but
                            we want to report as such.
        @returns            A dict containing status.
        """
        status = {
            'id': self._opunit_id,
            'proc_id': self.id.full,
            'metrics': self.metrics,
            'state': cur_state or self._StateObject__fsm.current_state
        }

        # filter out any private vars to the Agent inside of the sqlstream dict
        # "private" vars start with _ in the key name
        sqlstreams = {}
        for ssid, sinfo in self.sqlstreams.items():
            sqlstreams[ssid] = {}
            if sinfo.has_key('_fsm'):
                sqlstreams[ssid]['state'] = sinfo['_fsm'].current_state
            else:
                sqlstreams[ssid]['state'] = "?"
            for k, v in sinfo.items():
                if k[0:1] == "_":
                    continue
                sqlstreams[ssid][k] = v

        status['sqlstreams'] = sqlstreams

        return status

    def op_get_opunit_status(self, content, headers, msg):
        """
        Handles a request from the AppController to give it status of this AppAgent.
        """
        status = self._get_opunit_status()
        self.reply_ok(msg, status, {})

    def opunit_status(self, cur_state=None):
        """
        Sends the current status of this Agent/Op Unit.
        """
        content = self._get_opunit_status(cur_state)
        return self.rpc_send(self.target, 'opunit_status', content)

    @defer.inlineCallbacks
    def op_start_sqlstream(self, content, headers, msg):
        """
        Begins the process of starting and configuring a SQLStream instance on this op unit.
        The app controller calls here when it determines the op unit should spawn a new
        processing SQLStream.
        """
        log.info("op_start_sqlstream")  # : %s" % str(self.sqlstreams[ssid]))

        ssid = content['ssid']
        sqlt_vars = content['sqlt_vars']
        defs = yield self._get_sql_defs(uconf=sqlt_vars)
        failed = False
        ex = None

        try:
            self.start_sqlstream(ssid, sqlt_vars['inp_queue'], defs)
        except ValueError, e:
            failed = True
            ex = e

        if failed:
            resp = {'response': 'failed', 'exception': ex}
        else:
            resp = {'response': 'ok'}

        yield self.reply_ok(msg, resp, {})
예제 #5
0
    def test_put_common_backend(self):
        # Test with cassandra store backend where both services can access common values!
        services = [
            {
                'name': 'Junk1',
                'module': 'ion.services.coi.attributestore',
                'class': 'AttributeStoreService',
                'spawnargs': {
                    'servicename': 'as1',  # this is the name of the instance!
                    'backend_class':
                    'ion.data.backends.cassandra.CassandraStore',
                    'backend_args': {
                        'cass_host_list': ['amoeba.ucsd.edu:9160'],
                        'keyspace': 'Datastore',
                        'colfamily': 'DS1',
                        'cf_super': True,
                        'namespace': 'ours',
                        'key': 'Junk'
                    }
                }
            },
            {
                'name': 'Junk2',
                'module': 'ion.services.coi.attributestore',
                'class': 'AttributeStoreService',
                'spawnargs': {
                    'servicename': 'as2',  # this is the name of the instance!
                    'backend_class':
                    'ion.data.backends.cassandra.CassandraStore',
                    'backend_args': {
                        'cass_host_list': ['amoeba.ucsd.edu:9160'],
                        'keyspace': 'Datastore',
                        'colfamily': 'DS1',
                        'cf_super': True,
                        'namespace': 'ours',
                        'key': 'Junk'
                    }
                }
            }
        ]

        sup = yield self._spawn_processes(services)

        asc1 = AttributeStoreClient(proc=sup, targetname='as1')

        res1 = yield asc1.put('key1', 'value1')
        logging.info('Result1 put: ' + str(res1))

        res2 = yield asc1.get('key1')
        logging.info('Result2 get: ' + str(res2))
        self.assertEqual(res2, 'value1')

        res3 = yield asc1.put('key1', 'value2')

        res4 = yield asc1.get('key1')
        self.assertEqual(res4, 'value2')

        res5 = yield asc1.get('non_existing')
        self.assertEqual(res5, None)

        asc2 = AttributeStoreClient(proc=sup, targetname='as2')

        tres1 = yield asc2.put('tkey1', 'tvalue1')
        logging.info('tResult1 put: ' + str(tres1))

        tres2 = yield asc2.get('tkey1')
        logging.info('tResult2 get: ' + str(tres2))
        self.assertEqual(tres2, 'tvalue1')

        # Let cassandra register the new entry
        pu.asleep(5)

        # With common backends the value should be found.
        resx1 = yield asc2.get('key1')
        self.assertEqual(
            resx1,
            'value2',
            msg='Failed to pull value from second service instance')

        yield asc1.clear_store()
        yield asc2.clear_store()
예제 #6
0
    def test_put_seperate_backend(self):
        # Test with seperate store backends
        services = [
            {
                'name': 'attstore1',
                'module': 'ion.services.coi.attributestore',
                'class': 'AttributeStoreService',
                'spawnargs': {
                    'servicename': 'as1',
                    'backend_class': 'ion.data.store.Store',
                    'backend_args': {}
                }
            },
            {
                'name': 'attstore2',
                'module': 'ion.services.coi.attributestore',
                'class': 'AttributeStoreService',
                'spawnargs': {
                    'servicename': 'as2',
                    'backend_class': 'ion.data.store.Store',
                    'backend_args': {}
                }
            },
        ]

        sup = yield self._spawn_processes(services)

        asc1 = AttributeStoreClient(proc=sup, targetname='as1')

        res1 = yield asc1.put('key1', 'value1')
        logging.info('Result1 put: ' + str(res1))

        res2 = yield asc1.get('key1')
        logging.info('Result2 get: ' + str(res2))
        self.assertEqual(res2, 'value1')

        res3 = yield asc1.put('key1', 'value2')

        res4 = yield asc1.get('key1')
        self.assertEqual(res4, 'value2')

        res5 = yield asc1.get('non_existing')
        self.assertEqual(res5, None)

        asc2 = AttributeStoreClient(proc=sup, targetname='as2')

        # With separate backends this should return none
        resx1 = yield asc2.get('key1')
        self.assertEqual(resx1, None)

        yield asc1.clear_store()
        yield asc2.clear_store()