def start_client(self, name, module, cls, config, resource_id, deploy_file = DEFAULT_DEPLOY, bootmode=None): """ @brief Start up the instrument agent client """ self.start_container(deploy_file=deploy_file) # Start instrument agent. log.debug("Starting Agent Client") container_client = ContainerAgentClient(node=self.container.node, to_name=self.container.name) agent_config = deepcopy(config) agent_config['bootmode'] = bootmode log.debug("Agent pid: %s", self.instrument_agent_pid) log.debug("Bootmode: %s", bootmode) log.debug("Agent config: %s", agent_config) self.instrument_agent_pid = container_client.spawn_process( name=name, module=module, cls=cls, config=agent_config, process_id=str(self.instrument_agent_pid)) log.info('Agent pid=%s.', self.instrument_agent_pid) ia_client = ResourceAgentClient(resource_id, process=FakeProcess()) log.info('Got ia client %s.', str(ia_client)) self.instrument_agent_client = ia_client
class TestNodeBInt(IonIntegrationTestCase): def setUp(self): self._start_container() self.ccc = ContainerAgentClient(to_name=self.container.name) self.node = self.container.node patcher = patch('pyon.net.channel.RecvChannel._queue_auto_delete', False) patcher.start() self.addCleanup(patcher.stop) def test_pool_health_check(self): # make a request, thus making a bidir item self.ccc.status() self.assertEquals(1, len(self.node._bidir_pool)) curpoolchids = [o.get_channel_id() for o in self.node._bidir_pool.itervalues()] # fake that this channel has been corrupted in pika ch = self.node._bidir_pool.values()[0] chnum = ch.get_channel_id() del self.node.client.callbacks._callbacks[chnum]['_on_basic_deliver'] # make another request self.ccc.status() # should have killed our last channel, gotten a new one self.assertEquals(1, len(self.node._bidir_pool)) self.assertNotEquals(curpoolchids, [o.get_channel_id() for o in self.node._bidir_pool.itervalues()]) self.assertNotIn(ch, self.node._bidir_pool.itervalues()) self.assertIn(ch, self.node._dead_pool)
def setUp(self): # Start container self._start_container() # Establish endpoint with container container_client = ContainerAgentClient(node=self.container.node, name=self.container.name) container_client.start_rel_from_url('res/deploy/r2deploy.yml')
def start_fake_instrument_agent(container, stream_config={}, message_headers=None): # Create agent config. agent_config = { 'driver_config' : DVR_CONFIG, 'stream_config' : stream_config, 'agent' : {'resource_id': IA_RESOURCE_ID}, 'test_mode' : True } # Start instrument agent. log.debug("TestInstrumentAgent.setup(): starting IA.") container_client = ContainerAgentClient(node=container.node, name=container.name) ia_pid = container_client.spawn_process(name=IA_NAME, module=IA_MOD, cls=IA_CLS, config=agent_config, headers=message_headers) log.info('Agent pid=%s.', str(ia_pid)) # Start a resource agent client to talk with the instrument agent. ia_client = ResourceAgentClient(IA_RESOURCE_ID, process=FakeProcess()) log.info('Got ia client %s.', str(ia_client)) return ia_client
def setUp(self): """ Initialize test members. Start port agent. Start container and client. Start streams and subscribers. Start agent, client. """ TrhphTestCase.setUp(self) # Start port agent, add stop to cleanup. self._pagent = None self._start_pagent() self.addCleanup(self._stop_pagent) # Start container. self._start_container() # Bring up services in a deploy file (no need to message) self.container.start_rel_from_url("res/deploy/r2dm.yml") # Start data suscribers, add stop to cleanup. # Define stream_config. self._no_samples = None self._async_data_result = AsyncResult() self._data_greenlets = [] self._stream_config = {} self._samples_received = [] self._data_subscribers = [] self._start_data_subscribers() self.addCleanup(self._stop_data_subscribers) # Start event subscribers, add stop to cleanup. self._no_events = None self._async_event_result = AsyncResult() self._events_received = [] self._event_subscribers = [] self._start_event_subscribers() self.addCleanup(self._stop_event_subscribers) # Create agent config. agent_config = { "driver_config": DVR_CONFIG, "stream_config": self._stream_config, "agent": {"resource_id": IA_RESOURCE_ID}, "test_mode": True, } # Start instrument agent. self._ia_pid = None log.debug("TestInstrumentAgent.setup(): starting IA.") container_client = ContainerAgentClient(node=self.container.node, name=self.container.name) self._ia_pid = container_client.spawn_process(name=IA_NAME, module=IA_MOD, cls=IA_CLS, config=agent_config) log.info("Agent pid=%s.", str(self._ia_pid)) # Start a resource agent client to talk with the instrument agent. self._ia_client = None self._ia_client = ResourceAgentClient(IA_RESOURCE_ID, process=FakeProcess()) log.info("Got ia client %s.", str(self._ia_client))
def start_client(self, name, module, cls, config, resource_id, deploy_file = DEFAULT_DEPLOY, message_headers=None, bootmode=None): """ @brief Start up the instrument agent client """ self.start_container(deploy_file=deploy_file) # Start instrument agent. log.debug("Starting Agent Client") container_client = ContainerAgentClient(node=self.container.node, to_name=self.container.name) agent_config = deepcopy(config) agent_config['bootmode'] = bootmode log.debug("Agent config: %s", agent_config) instrument_agent_pid = container_client.spawn_process( name=name, module=module, cls=cls, config=agent_config, headers=message_headers) log.info('Agent pid=%s.', instrument_agent_pid) ia_client = ResourceAgentClient(resource_id, process=FakeProcess()) log.info('Got ia client %s.', str(ia_client)) self.instrument_agent_client = ia_client
def test_bank(self): # Start container self._start_container() # Establish endpoint with container container_client = ContainerAgentClient(node=self.container.node, name=self.container.name) container_client.start_rel_from_url('res/deploy/examples/bank_complete.yml') # Now create client to bank service client = BankServiceClient(node=self.container.node) # Send some requests print 'Creating savings account' savingsAcctNum = client.new_account('kurt', 'Savings') print "New savings account number: " + str(savingsAcctNum) print "Starting savings balance %s" % str(client.get_balances(savingsAcctNum)) client.deposit(savingsAcctNum, 99999999) print "Savings balance after deposit %s" % str(client.get_balances(savingsAcctNum)) client.withdraw(savingsAcctNum, 1000) print "Savings balance after withdrawl %s" % str(client.get_balances(savingsAcctNum)) print "Buying 1000 savings bonds" client.buy_bonds(savingsAcctNum, 1000) print "Savings balance after bond purchase %s" % str(client.get_balances(savingsAcctNum)) checkingAcctNum = client.new_account('kurt', 'Checking') print "New checking account number: " + str(checkingAcctNum) print "Starting checking balance %s" % str(client.get_balances(checkingAcctNum)) client.deposit(checkingAcctNum, 99999999) print "Confirming checking balance after deposit %s" % str(client.get_balances(checkingAcctNum)) client.withdraw(checkingAcctNum, 1000) print "Confirming checking balance after withdrawl %s" % str(client.get_balances(checkingAcctNum)) acctList = client.list_accounts('kurt') self.assertTrue(len(acctList) == 2)
def start_instrument_agent_process(container, stream_config={}, resource_id=IA_RESOURCE_ID, resource_name=IA_NAME, org_name=None, message_headers=None): log.info("foobar") # Create agent config. agent_config = { 'driver_config' : DVR_CONFIG, 'stream_config' : stream_config, 'agent' : {'resource_id': resource_id}, 'test_mode' : True } if org_name is not None: agent_config['org_name'] = org_name # Start instrument agent. log.info("TestInstrumentAgent.setup(): starting IA.") container_client = ContainerAgentClient(node=container.node, name=container.name) log.info("Agent setup") ia_pid = container_client.spawn_process(name=resource_name, module=IA_MOD, cls=IA_CLS, config=agent_config, headers=message_headers) log.info('Agent pid=%s.', str(ia_pid)) # Start a resource agent client to talk with the instrument agent. ia_client = ResourceAgentClient(resource_id, process=FakeProcess()) log.info('Got ia client %s.', str(ia_client)) return ia_client
def stop_client(self): if self.instrument_agent_pid: container_client = ContainerAgentClient(node=self.container.node, name=self.container.name) log.debug("Stopping agent client.") container_client.terminate_process(self.instrument_agent_pid) if self.instrument_agent_client: self.instrument_agent_client = None
def test_start_hello(self): # start a service over messaging self._start_container() cc_client = ContainerAgentClient(node=self.container.node, name=self.container.name) p = cc_client.spawn_process('hello', 'examples.service.hello_service', 'HelloService')
def setUp(self): self._start_container() self.ccc = ContainerAgentClient(to_name=self.container.name) self.node = self.container.node patcher = patch('pyon.net.channel.RecvChannel._queue_auto_delete', False) patcher.start() self.addCleanup(patcher.stop)
def start_agent(self): """ Start an instrument agent and client. """ log.info('Creating driver integration test support:') log.info('driver module: %s', DRV_MOD) log.info('driver class: %s', DRV_CLS) log.info('device address: %s', DEV_ADDR) log.info('device port: %s', DEV_PORT) log.info('log delimiter: %s', DELIM) log.info('work dir: %s', WORK_DIR) self._support = DriverIntegrationTestSupport(DRV_MOD, DRV_CLS, DEV_ADDR, DEV_PORT, DATA_PORT, CMD_PORT, PA_BINARY, DELIM, WORK_DIR) # Start port agent, add stop to cleanup. port = self._support.start_pagent() log.info('Port agent started at port %i',port) # Configure driver to use port agent port number. DVR_CONFIG['comms_config'] = { 'addr' : 'localhost', 'port' : port, 'cmd_port' : CMD_PORT } self.addCleanup(self._support.stop_pagent) # Create agent config. agent_config = { 'driver_config' : DVR_CONFIG, 'stream_config' : {}, 'agent' : {'resource_id': IA_RESOURCE_ID}, 'test_mode' : True } # Start instrument agent. log.debug("Starting IA.") container_client = ContainerAgentClient(node=self.container.node, name=self.container.name) ia_pid = container_client.spawn_process(name=IA_NAME, module=IA_MOD, cls=IA_CLS, config=agent_config) log.info('Agent pid=%s.', str(ia_pid)) # Start a resource agent client to talk with the instrument agent. self._ia_client = ResourceAgentClient(IA_RESOURCE_ID, process=FakeProcess()) log.info('Got ia client %s.', str(self._ia_client))
def main(): parser = argparse.ArgumentParser(description="ScionCC Control") parser.add_argument( "pidfile", help="pidfile to use. If not specified, uses the first one found.") parser.add_argument("command", help="command to send to the container agent", choices=IContainerAgent.names()) parser.add_argument("commandargs", metavar="arg", nargs="*", help="arguments to the command being sent") opts = parser.parse_args() pidfile = opts.pidfile if not pidfile: raise Exception("No pidfile specified") parms = {} with open(pidfile, 'r') as pf: parms = msgpack.loads(pf.read()) assert parms, "No content in pidfile" bootstrap_pyon() node, ioloop = make_node(parms['messaging']) node.setup_interceptors(CFG.interceptor) cc = ContainerAgentClient(node=node, to_name=(parms['container-xp'], parms['container-agent'])) # make a manual call - this is to avoid having to have the IonObject for the call methdefs = [ x[1] for x in IContainerAgent.namesAndDescriptions() if x[0] == opts.command ] assert len(methdefs) == 1 arg_names = methdefs[0].positional # ('name', 'module', 'cls', 'config') msg_args = dict( zip(arg_names, opts.commandargs) ) # ('name', <usrinp1>, 'cls', <usrinp2>) -> { 'name' : <usrinp1>, 'cls': <usrinp2> } retval = cc.request(msg_args, op=opts.command) # special case: status if opts.command == "status": statstr = retval print "Status:", statstr if statstr != "RUNNING": node.client.close() sys.exit(2) else: print "Returned", retval node.client.close()
def _action_spawn_process(self, action_kwargs): cc_agent_name = action_kwargs["cc_agent"] proc_name = action_kwargs["proc_name"] module = action_kwargs["module"] cls = action_kwargs["cls"] config = action_kwargs["config"] target_cc_agent = ContainerAgentClient(to_name=cc_agent_name) proc_id = target_cc_agent.spawn_process(proc_name, module, cls, config) return proc_id
def _stop_agent(self): """ """ if self._ia_pid: container_client = ContainerAgentClient(node=self.container.node, name=self.container.name) container_client.terminate_process(self._ia_pid) if self._ia_client: self._ia_client = None
def setUp(self): # Start container self._start_container() # Establish endpoint with container container_client = ContainerAgentClient(node=self.container.node, name=self.container.name) container_client.start_rel_from_url("res/deploy/r2coi.yml") # Now create client to bank service self.datastore_service = DatastoreServiceClient(node=self.container.node)
def setUp(self): # Start container self._start_container() # Establish endpoint with container container_client = ContainerAgentClient(node=self.container.node, name=self.container.name) container_client.start_rel_from_url('res/deploy/r2coi.yml') # Now create client to bank service self.resource_registry_service = ResourceRegistryServiceClient(node=self.container.node)
def setUp(self): # Start container self._start_container() # Establish endpoint with container container_client = ContainerAgentClient(node=self.container.node, name=self.container.name) container_client.start_rel_from_url('res/deploy/r2coi.yml') self.org_management_service = OrgManagementServiceClient(node=self.container.node)
def setUp(self): self._start_container() self.container.start_rel_from_url('res/deploy/r2cei.yml') self.resource_id = "eeagent_1234" self._eea_name = "eeagent" self.supd_directory = tempfile.mkdtemp() self.agent_config = { 'eeagent': { 'heartbeat': 0, 'slots': 100, 'name': 'pyon_eeagent', 'launch_type': { 'name': 'supd', 'pyon_directory': os.getcwd(), 'supd_directory': self.supd_directory, 'supdexe': 'bin/supervisord' }, }, 'agent': { 'resource_id': self.resource_id }, 'logging': { 'loggers': { 'eeagent': { 'level': 'DEBUG', 'handlers': ['console'] } }, 'root': { 'handlers': ['console'] }, } } # Start eeagent. self._eea_pid = None self.container_client = ContainerAgentClient(node=self.container.node, name=self.container.name) self._eea_pid = self.container_client.spawn_process( name=self._eea_name, module="ion.agents.cei.execution_engine_agent", cls="ExecutionEngineAgent", config=self.agent_config) log.info('Agent pid=%s.', str(self._eea_pid)) # Start a resource agent client to talk with the instrument agent. self._eea_pyon_client = SimpleResourceAgentClient( self.resource_id, process=FakeProcess()) log.info('Got eea client %s.', str(self._eea_pyon_client)) self.eea_client = ExecutionEngineAgentClient(self._eea_pyon_client)
def setUp(self): self._start_container() self.container.start_rel_from_url('res/deploy/r2cei.yml') #self.pd_cli = ProcessDispatcherServiceClient(node=self.container.node) self.pd_cli = ProcessDispatcherServiceClient(to_name="process_dispatcher") self.process_definition_id = uuid4().hex self.process_definition_name = 'test' self.process_definition = ProcessDefinition(name=self.process_definition_name, executable={ 'module': 'ion.agents.cei.test.test_haagent', 'class': 'TestProcess' }) self.pd_cli.create_process_definition(self.process_definition, self.process_definition_id) self.resource_id = "haagent_1234" self._haa_name = "high_availability_agent" self._haa_dashi_name = "dashi_haa_" + uuid4().hex self._haa_dashi_uri = get_dashi_uri_from_cfg() self._haa_dashi_exchange = "%s.hatests" % bootstrap.get_sys_name() self._haa_config = { 'highavailability': { 'policy': { 'interval': 1, 'name': 'npreserving', 'parameters': { 'preserve_n': 0 } }, 'process_definition_id': self.process_definition_id, 'dashi_messaging' : True, 'dashi_exchange' : self._haa_dashi_exchange, 'dashi_name': self._haa_dashi_name }, 'agent': {'resource_id': self.resource_id}, } self._base_services, _ = self.container.resource_registry.find_resources( restype="Service", name=self.process_definition_name) self._base_procs = self.pd_cli.list_processes() self.waiter = ProcessStateWaiter() self.waiter.start() self.container_client = ContainerAgentClient(node=self.container.node, name=self.container.name) self._haa_pid = self.container_client.spawn_process(name=self._haa_name, module="ion.agents.cei.high_availability_agent", cls="HighAvailabilityAgent", config=self._haa_config) # Start a resource agent client to talk with the instrument agent. self._haa_pyon_client = SimpleResourceAgentClient(self.resource_id, process=FakeProcess()) log.info('Got haa client %s.', str(self._haa_pyon_client)) self.haa_client = HighAvailabilityAgentClient(self._haa_pyon_client)
def setUp(self): self.subject = "/DC=org/DC=cilogon/C=US/O=ProtectNetwork/CN=Roger Unwin A254" # Start container self._start_container() # Establish endpoint with container container_client = ContainerAgentClient(node=self.container.node, name=self.container.name) container_client.start_rel_from_url('res/deploy/r2coi.yml') self.identity_management_service = IdentityManagementServiceClient(node=self.container.node)
def setUp(self): self._start_container() # Establish endpoint with container container_client = ContainerAgentClient(node=self.container.node, name=self.container.name) container_client.start_rel_from_url('res/deploy/r2deploy.yml') # Now create client to bank service self.client = PubsubManagementServiceClient(node=self.container.node) self.container.spawn_process('test_process', 'pyon.ion.streamproc','StreamProcess', config={'process':{'type':'stream_process','listen_name':'ctd_data'}})
def setUp(self): # Start container self._start_container() # Establish endpoint with container container_client = ContainerAgentClient(node=self.container.node, name=self.container.name) #print 'got CC client' container_client.start_rel_from_url('res/deploy/r2sa.yml') # Now create client to DataAcquisitionManagementService self.client = DataAcquisitionManagementServiceClient(node=self.container.node) self.rrclient = ResourceRegistryServiceClient(node=self.container.node) self.dataproductclient = DataProductManagementServiceClient(node=self.container.node)
def _start_agent(self): """ """ container_client = ContainerAgentClient(node=self.container.node, name=self.container.name) self._ia_pid = container_client.spawn_process( name=IA_NAME, module=IA_MOD, cls=IA_CLS, config=self._agent_config ) log.info("Started instrument agent pid=%s.", str(self._ia_pid)) # Start a resource agent client to talk with the instrument agent. self._ia_client = None self._ia_client = ResourceAgentClient(IA_RESOURCE_ID, process=FakeProcess()) log.info("Got instrument agent client %s.", str(self._ia_client))
def setUp(self): # Start container self._start_container() #print 'started container' # Establish endpoint with container container_client = ContainerAgentClient(node=self.container.node, name=self.container.name) #print 'got CC client' container_client.start_rel_from_url('res/deploy/r2sa.yml') print 'started services' # Now create client to DataProcessManagementService self.DPMSclient = DataProcessManagementServiceClient(node=self.container.node) self.RRclient = ResourceRegistryServiceClient(node=self.container.node)
def setUp(self): # Start container #print 'starting container' self._start_container() #print 'started container' setattr(self.container, 'ia_mock_quit', False) # Establish endpoint with container container_client = ContainerAgentClient(node=self.container.node, name=self.container.name) #print 'got CC client' container_client.start_rel_from_url('res/deploy/examples/ia_mock.yml') print 'started services' self.container_client = container_client
def _start_agent(self): """ """ container_client = ContainerAgentClient(node=self.container.node, name=self.container.name) self._ia_pid = container_client.spawn_process( name=IA_NAME, module=IA_MOD, cls=IA_CLS, config=self._agent_config) log.info('Started instrument agent pid=%s.', str(self._ia_pid)) # Start a resource agent client to talk with the instrument agent. self._ia_client = None self._ia_client = ResourceAgentClient(IA_RESOURCE_ID, process=FakeProcess()) log.info('Got instrument agent client %s.', str(self._ia_client))
def _start_eeagent(self): self.container_client = ContainerAgentClient( node=self.container.node, name=self.container.name) self.container = self.container_client._get_container_instance() self._eea_pid = self.container_client.spawn_process( name=self._eea_name, module="ion.agents.cei.execution_engine_agent", cls="ExecutionEngineAgent", config=self.agent_config) log.info('Agent pid=%s.', str(self._eea_pid)) # Start a resource agent client to talk with the instrument agent. self._eea_pyon_client = SimpleResourceAgentClient(self.resource_id, process=FakeProcess()) log.info('Got eea client %s.', str(self._eea_pyon_client)) self.eea_client = ExecutionEngineAgentClient(self._eea_pyon_client)
def my_test_init(self): # Start container #print 'instantiating container' self._start_container() #container = Container() #print 'starting container' #container.start() #print 'started container' # Establish endpoint with container container_client = ContainerAgentClient(node=self.container.node, name=self.container.name) #container_client = ProcessRPCClient(node=container.node, name=container.name, iface=IContainerAgent, process=FakeProcess()) #print 'got CC client' container_client.start_rel_from_url('res/deploy/r2ims.yml') print 'started services'
def setUp(self): # set up the container self._start_container() self.cc = ContainerAgentClient(node=self.container.node,name=self.container.name) self.cc.start_rel_from_url('res/deploy/r2deploy.yml') self.pubsub_cli = PubsubManagementServiceClient(node=self.cc.node) self.tms_cli = TransformManagementServiceClient(node=self.cc.node) self.rr_cli = ResourceRegistryServiceClient(node=self.cc.node) self.input_stream = IonObject(RT.Stream,name='ctd1 output', description='output from a ctd') self.input_stream.original = True self.input_stream.mimetype = 'hdf' self.input_stream_id = self.pubsub_cli.create_stream(self.input_stream) self.input_subscription = IonObject(RT.Subscription,name='ctd1 subscription', description='subscribe to this if you want ctd1 data') self.input_subscription.query['stream_id'] = self.input_stream_id self.input_subscription.exchange_name = 'a queue' self.input_subscription_id = self.pubsub_cli.create_subscription(self.input_subscription) self.output_stream = IonObject(RT.Stream,name='transform output', description='output from the transform process') self.output_stream.original = True self.output_stream.mimetype='raw' self.output_stream_id = self.pubsub_cli.create_stream(self.output_stream) self.process_definition = IonObject(RT.ProcessDefinition,name='transform_process') self.process_definition.executable = {'module': 'ion.services.dm.transformation.example.transform_example', 'class':'TransformExample'} self.process_definition_id, _= self.rr_cli.create(self.process_definition)
def setUp(self): self.dashi = None self._start_container() self.container_client = ContainerAgentClient(node=self.container.node, name=self.container.name) self.container = self.container_client._get_container_instance() app = dict(name="process_dispatcher", processapp=("process_dispatcher", "ion.services.cei.process_dispatcher_service", "ProcessDispatcherService")) self.container.start_app(app, config=pd_config) self.rr_cli = self.container.resource_registry self.pd_cli = ProcessDispatcherServiceClient(node=self.container.node) self.process_definition = ProcessDefinition(name='test_process') self.process_definition.executable = {'module': 'ion.services.cei.test.test_process_dispatcher', 'class': 'TestProcess'} self.process_definition_id = self.pd_cli.create_process_definition(self.process_definition) self._eea_pids = [] self._tmpdirs = [] self.dashi = get_dashi(uuid.uuid4().hex, pd_config['processdispatcher']['dashi_uri'], pd_config['processdispatcher']['dashi_exchange']) #send a fake node_state message to PD's dashi binding. self.node1_id = uuid.uuid4().hex self._send_node_state("engine1", self.node1_id) self._start_eeagent(self.node1_id) self.waiter = ProcessStateWaiter()
def setUp(self): # set up a fake dashi consumer to act as the PD try: import dashi except ImportError: raise SkipTest("Process Dispatcher Bridge integration test "+ "requires the dashi library. Skipping.") self.fake_pd = FakePD(dashi.DashiConnection(self.dashi_pd_topic, self.dashi_uri, self.dashi_exchange)) self.fake_pd.consume_in_thread() # set up the container self._start_container() self.cc = ContainerAgentClient(node=self.container.node, name=self.container.name) CFG['process_dispatcher_bridge'] = dict(uri="memory://local", exchange="test_pd_bridge_exchange", topic="processdispatcher") self.cc.start_rel_from_url('res/deploy/r2cei.yml') self.pd_cli = ProcessDispatcherServiceClient(node=self.cc.node) self.process_definition = ProcessDefinition(name='basic_transform_definition') self.process_definition.executable = {'module': 'ion.processes.data.transforms.transform_example', 'class':'TransformExample'} self.process_definition_id = self.pd_cli.create_process_definition(self.process_definition)
def setUp(self): # Start container self._start_container() # Establish endpoint with container container_client = ContainerAgentClient(node=self.container.node, name=self.container.name) container_client.start_rel_from_url('res/deploy/r2coi.yml') # Now create client to service self.service_gateway_service = ServiceGatewayServiceClient(node=self.container.node) log.debug('stopping Gateway web server') # Stop the web server as it is not needed. self.service_gateway_service.stop_service() self.test_app = TestApp(app)
def setUp(self): self._start_container() self.container.start_rel_from_url('res/deploy/r2cei.yml') #self.pd_cli = ProcessDispatcherServiceClient(node=self.container.node) self.pd_cli = ProcessDispatcherServiceClient( to_name="process_dispatcher") self.process_definition_id = uuid4().hex self.process_definition_name = 'test_haagent_%s' % self.process_definition_id self.process_definition = ProcessDefinition( name=self.process_definition_name, executable={ 'module': 'ion.agents.cei.test.test_haagent', 'class': 'TestProcess' }) self.pd_cli.create_process_definition(self.process_definition, self.process_definition_id) service_definition = SERVICE_DEFINITION_TMPL % self.process_definition_name sd = IonObject(RT.ServiceDefinition, { "name": self.process_definition_name, "definition": service_definition }) self.service_def_id, _ = self.container.resource_registry.create(sd) self.resource_id = "haagent_1234" self._haa_name = "high_availability_agent" self._haa_dashi_name = "dashi_haa_" + uuid4().hex self._haa_dashi_uri = get_dashi_uri_from_cfg() self._haa_dashi_exchange = "hatests" self._haa_config = self._get_haagent_config() self._base_services, _ = self.container.resource_registry.find_resources( restype="Service", name=self.process_definition_name) self._base_procs = self.pd_cli.list_processes() self.waiter = ProcessStateWaiter() self.waiter.start() self.container_client = ContainerAgentClient(node=self.container.node, name=self.container.name) self._spawn_haagent() self.addCleanup(self._stop_haagent) self._setup_haa_client()
def setUp(self): # Start container self._start_container() self.container.start_rel_from_url('res/deploy/r2deploy.yml') # Now create client to DataAcquisitionManagementService self.client = DataAcquisitionManagementServiceClient(node=self.container.node) self.rrclient = ResourceRegistryServiceClient(node=self.container.node) self.dataproductclient = DataProductManagementServiceClient(node=self.container.node) self.dams_client = DataAcquisitionManagementServiceClient(node=self.container.node) self.pubsub_client = PubsubManagementServiceClient(node=self.container.node) self.processdispatchclient = ProcessDispatcherServiceClient(node=self.container.node) self.data_retriever = DataRetrieverServiceClient(node=self.container.node) self._container_client = ContainerAgentClient(node=self.container.node, name=self.container.name) # Data async and subscription TODO: Replace with new subscriber self._finished_count = None #TODO: Switch to gevent.queue.Queue self._async_finished_result = AsyncResult() self._finished_events_received = [] self._finished_event_subscriber = None self._start_finished_event_subscriber() self.addCleanup(self._stop_finished_event_subscriber) self.DVR_CONFIG = {} self.DVR_CONFIG = { 'dvr_mod' : 'ion.agents.data.handlers.slocum_data_handler', 'dvr_cls' : 'SlocumDataHandler', } self._setup_resources() self.agent_config = { 'driver_config' : self.DVR_CONFIG, 'stream_config' : {}, 'agent' : {'resource_id': self.EDA_RESOURCE_ID}, 'test_mode' : True } datasetagent_instance_obj = IonObject(RT.ExternalDatasetAgentInstance, name='ExternalDatasetAgentInstance1', description='external data agent instance', handler_module=self.EDA_MOD, handler_class=self.EDA_CLS, dataset_driver_config=self.DVR_CONFIG, dataset_agent_config=self.agent_config ) self.dataset_agent_instance_id = self.dams_client.create_external_dataset_agent_instance(external_dataset_agent_instance=datasetagent_instance_obj, external_dataset_agent_id=self.datasetagent_id, external_dataset_id=self.EDA_RESOURCE_ID) #TG: Setup/configure the granule logger to log granules as they're published pid = self.dams_client.start_external_dataset_agent_instance(self.dataset_agent_instance_id) dataset_agent_instance_obj= self.dams_client.read_external_dataset_agent_instance(self.dataset_agent_instance_id) print 'TestBulkIngest: Dataset agent instance obj: = ', dataset_agent_instance_obj # Start a resource agent client to talk with the instrument agent. self._ia_client = ResourceAgentClient('datasetagentclient', name=pid, process=FakeProcess()) log.debug(" test_createTransformsThenActivateInstrument:: got ia client %s", str(self._ia_client))
def _start_agent(self, bootmode=None): """ """ container_client = ContainerAgentClient(node=self.container.node, name=self.container.name) agent_config = deepcopy(self._agent_config) agent_config['bootmode'] = bootmode self._ia_pid = container_client.spawn_process(name=IA_NAME, module=IA_MOD, cls=IA_CLS, config=agent_config, process_id=self._ia_pid) # Start a resource agent client to talk with the instrument agent. self._ia_client = None self._ia_client = ResourceAgentClient(IA_RESOURCE_ID, process=FakeProcess()) log.info('Got instrument agent client %s.', str(self._ia_client))
def setUp(self): """ Initialize test members. """ #log.warn('Starting the container') # Start container. self._start_container() # Bring up services in a deploy file #log.warn('Starting the rel') self.container.start_rel_from_url('res/deploy/r2deploy.yml') # Create a pubsub client to create streams. # log.warn('Init a pubsub client') self._pubsub_client = PubsubManagementServiceClient(node=self.container.node) # log.warn('Init a ContainerAgentClient') self._container_client = ContainerAgentClient(node=self.container.node, name=self.container.name) # Data async and subscription TODO: Replace with new subscriber self._finished_count = None #TODO: Switch to gevent.queue.Queue self._async_finished_result = AsyncResult() self._finished_events_received = [] self._finished_event_subscriber = None self._start_finished_event_subscriber() self.addCleanup(self._stop_finished_event_subscriber) # TODO: Finish dealing with the resources and whatnot # TODO: DVR_CONFIG and (potentially) stream_config could both be reconfigured in self._setup_resources() self._setup_resources() #TG: Setup/configure the granule logger to log granules as they're published # Create agent config. agent_config = { 'driver_config': self.DVR_CONFIG, 'stream_config': {}, 'agent': {'resource_id': self.EDA_RESOURCE_ID}, 'test_mode': True } # Start instrument agent. self._ia_pid = None log.debug('TestInstrumentAgent.setup(): starting EDA.') self._ia_pid = self._container_client.spawn_process( name=self.EDA_NAME, module=self.EDA_MOD, cls=self.EDA_CLS, config=agent_config ) log.info('Agent pid=%s.', str(self._ia_pid)) # Start a resource agent client to talk with the instrument agent. self._ia_client = None self._ia_client = ResourceAgentClient(self.EDA_RESOURCE_ID, process=FakeProcess()) log.info('Got ia client %s.', str(self._ia_client))
def main(): parser = argparse.ArgumentParser(description="ScionCC Control") parser.add_argument("pidfile", help="pidfile to use. If not specified, uses the first one found.") parser.add_argument("command", help="command to send to the container agent", choices=IContainerAgent.names()) parser.add_argument("commandargs", metavar="arg", nargs="*", help="arguments to the command being sent") opts = parser.parse_args() pidfile = opts.pidfile if not pidfile: raise Exception("No pidfile specified") parms = {} with open(pidfile, 'r') as pf: parms = msgpack.loads(pf.read()) assert parms, "No content in pidfile" bootstrap_pyon() node, ioloop = make_node(parms['messaging']) node.setup_interceptors(CFG.interceptor) cc = ContainerAgentClient(node=node, to_name=(parms['container-xp'], parms['container-agent'])) # make a manual call - this is to avoid having to have the IonObject for the call methdefs = [x[1] for x in IContainerAgent.namesAndDescriptions() if x[0] == opts.command] assert len(methdefs) == 1 arg_names = methdefs[0].positional # ('name', 'module', 'cls', 'config') msg_args = dict(zip(arg_names, opts.commandargs)) # ('name', <usrinp1>, 'cls', <usrinp2>) -> { 'name' : <usrinp1>, 'cls': <usrinp2> } retval = cc.request(msg_args, op=opts.command) # special case: status if opts.command == "status": statstr = retval print "Status:", statstr if statstr != "RUNNING": node.client.close() sys.exit(2) else: print "Returned", retval node.client.close()
def _start_agent(self): """ Start an instrument agent and client. """ log.info("Creating driver integration test support:") log.info("driver module: %s", DRV_MOD) log.info("driver class: %s", DRV_CLS) log.info("device address: %s", DEV_ADDR) log.info("device port: %s", DEV_PORT) log.info("log delimiter: %s", DELIM) log.info("work dir: %s", WORK_DIR) self._support = DriverIntegrationTestSupport( DRV_MOD, DRV_CLS, DEV_ADDR, DEV_PORT, DATA_PORT, CMD_PORT, PA_BINARY, DELIM, WORK_DIR ) # Start port agent, add stop to cleanup. port = self._support.start_pagent() log.info("Port agent started at port %i", port) # Configure driver to use port agent port number. DVR_CONFIG["comms_config"] = {"addr": "localhost", "port": port} self.addCleanup(self._support.stop_pagent) # Create agent config. agent_config = { "driver_config": DVR_CONFIG, "stream_config": {}, "agent": {"resource_id": IA_RESOURCE_ID}, "test_mode": True, } # Start instrument agent. log.debug("Starting IA.") container_client = ContainerAgentClient(node=self.container.node, name=self.container.name) ia_pid = container_client.spawn_process(name=IA_NAME, module=IA_MOD, cls=IA_CLS, config=agent_config) log.info("Agent pid=%s.", str(ia_pid)) # Start a resource agent client to talk with the instrument agent. self._ia_client = ResourceAgentClient(IA_RESOURCE_ID, process=FakeProcess()) log.info("Got ia client %s.", str(self._ia_client))
def test_bank(self): # Start container self._start_container() # Establish endpoint with container container_client = ContainerAgentClient(node=self.container.node, name=self.container.name) container_client.start_rel_from_url( 'res/deploy/examples/bank_complete.yml') # Now create client to bank service client = BankServiceClient(node=self.container.node) # Send some requests print 'Creating savings account' savingsAcctNum = client.new_account('kurt', 'Savings') print "New savings account number: " + str(savingsAcctNum) print "Starting savings balance %s" % str( client.get_balances(savingsAcctNum)) client.deposit(savingsAcctNum, 99999999) print "Savings balance after deposit %s" % str( client.get_balances(savingsAcctNum)) client.withdraw(savingsAcctNum, 1000) print "Savings balance after withdrawl %s" % str( client.get_balances(savingsAcctNum)) print "Buying 1000 savings bonds" client.buy_bonds(savingsAcctNum, 1000) print "Savings balance after bond purchase %s" % str( client.get_balances(savingsAcctNum)) checkingAcctNum = client.new_account('kurt', 'Checking') print "New checking account number: " + str(checkingAcctNum) print "Starting checking balance %s" % str( client.get_balances(checkingAcctNum)) client.deposit(checkingAcctNum, 99999999) print "Confirming checking balance after deposit %s" % str( client.get_balances(checkingAcctNum)) client.withdraw(checkingAcctNum, 1000) print "Confirming checking balance after withdrawl %s" % str( client.get_balances(checkingAcctNum)) acctList = client.list_accounts('kurt') self.assertTrue(len(acctList) == 2)
def setUp(self): self.dashi = None self._start_container() from pyon.public import CFG self.container_client = ContainerAgentClient(node=self.container.node, name=self.container.name) self.container = self.container_client._get_container_instance() app = dict(name="process_dispatcher", processapp=("process_dispatcher", "ion.services.cei.process_dispatcher_service", "ProcessDispatcherService")) self.container.start_app(app, config=pd_config) self.rr_cli = self.container.resource_registry self.pd_cli = ProcessDispatcherServiceClient(node=self.container.node) self.process_definition = ProcessDefinition(name='test_process') self.process_definition.executable = { 'module': 'ion.services.cei.test.test_process_dispatcher', 'class': 'TestProcess' } self.process_definition_id = self.pd_cli.create_process_definition( self.process_definition) self._eea_pids = [] self._eea_pid_to_resource_id = {} self._eea_pid_to_persistence_dir = {} self._tmpdirs = [] self.dashi = get_dashi( uuid.uuid4().hex, pd_config['processdispatcher']['dashi_uri'], pd_config['processdispatcher']['dashi_exchange'], sysname=CFG.get_safe("dashi.sysname")) #send a fake node_state message to PD's dashi binding. self.node1_id = uuid.uuid4().hex self._send_node_state("engine1", self.node1_id) self._initial_eea_pid = self._start_eeagent(self.node1_id) self.waiter = ProcessStateWaiter()
def setUp(self): """ Setup the test environment to exersice use of instrumet agent, including: * define driver_config parameters. * create container with required services and container client. * create publication stream ids for each driver data stream. * create stream_config parameters. * create and activate subscriptions for agent data streams. * spawn instrument agent process and create agent client. * add cleanup functions to cause subscribers to get stopped. """ # params = { ('CTD', 'TA2'): -1.9434316e-05, # ('CTD', 'PTCA1'): 1.3206866, # ('CTD', 'TCALDATE'): [8, 11, 2006] } # for tup in params: # print tup self.addCleanup(self.customCleanUp) # Names of agent data streams to be configured. parsed_stream_name = 'ctd_parsed' raw_stream_name = 'ctd_raw' # Driver configuration. #Simulator self.driver_config = { 'svr_addr': 'localhost', 'cmd_port': 5556, 'evt_port': 5557, 'dvr_mod': 'ion.agents.instrument.drivers.sbe37.sbe37_driver', 'dvr_cls': 'SBE37Driver', 'comms_config': { SBE37Channel.CTD: { 'method': 'ethernet', 'device_addr': CFG.device.sbe37.host, 'device_port': CFG.device.sbe37.port, 'server_addr': 'localhost', 'server_port': 8888 } } } #Hardware ''' self.driver_config = { 'svr_addr': 'localhost', 'cmd_port': 5556, 'evt_port': 5557, 'dvr_mod': 'ion.agents.instrument.drivers.sbe37.sbe37_driver', 'dvr_cls': 'SBE37Driver', 'comms_config': { SBE37Channel.CTD: { 'method':'ethernet', 'device_addr': '137.110.112.119', 'device_port': 4001, 'server_addr': 'localhost', 'server_port': 8888 } } } ''' # Start container. self._start_container() # Establish endpoint with container (used in tests below) self._container_client = ContainerAgentClient(node=self.container.node, name=self.container.name) # Bring up services in a deploy file (no need to message) self.container.start_rel_from_url('res/deploy/r2dm.yml') # Create a pubsub client to create streams. self._pubsub_client = PubsubManagementServiceClient( node=self.container.node) # A callback for processing subscribed-to data. def consume(message, headers): log.info('Subscriber received message: %s', str(message)) # Create a stream subscriber registrar to create subscribers. subscriber_registrar = StreamSubscriberRegistrar( process=self.container, node=self.container.node) self.subs = [] # Create streams for each stream named in driver. self.stream_config = {} for (stream_name, val) in PACKET_CONFIG.iteritems(): stream_def = ctd_stream_definition(stream_id=None) stream_def_id = self._pubsub_client.create_stream_definition( container=stream_def) stream_id = self._pubsub_client.create_stream( name=stream_name, stream_definition_id=stream_def_id, original=True, encoding='ION R2') self.stream_config[stream_name] = stream_id # Create subscriptions for each stream. exchange_name = '%s_queue' % stream_name sub = subscriber_registrar.create_subscriber( exchange_name=exchange_name, callback=consume) sub.start() query = StreamQuery(stream_ids=[stream_id]) sub_id = self._pubsub_client.create_subscription(\ query=query, exchange_name=exchange_name) self._pubsub_client.activate_subscription(sub_id) self.subs.append(sub) # Add cleanup function to stop subscribers. def stop_subscriber(sub_list): for sub in sub_list: sub.stop() self.addCleanup(stop_subscriber, self.subs) # Create agent config. self.agent_resource_id = '123xyz' self.agent_config = { 'driver_config': self.driver_config, 'stream_config': self.stream_config, 'agent': { 'resource_id': self.agent_resource_id } } # Launch an instrument agent process. self._ia_name = 'agent007' self._ia_mod = 'ion.agents.instrument.instrument_agent' self._ia_class = 'InstrumentAgent' self._ia_pid = self._container_client.spawn_process( name=self._ia_name, module=self._ia_mod, cls=self._ia_class, config=self.agent_config) log.info('got pid=%s', str(self._ia_pid)) self._ia_client = None # Start a resource agent client to talk with the instrument agent. self._ia_client = ResourceAgentClient(self.agent_resource_id, process=FakeProcess()) log.info('got ia client %s', str(self._ia_client))
def setUp(self): self._start_container() self.container.start_rel_from_url('res/deploy/r2cei.yml') self.pd_cli = ProcessDispatcherServiceClient(to_name="process_dispatcher") self.process_definition_id = uuid4().hex self.process_definition = ProcessDefinition(name='test', executable={ 'module': 'ion.agents.cei.test.test_haagent', 'class': 'TestProcess' }) self.pd_cli.create_process_definition(self.process_definition, self.process_definition_id) http_port = 8919 http_port = self._start_webserver(port=http_port) self.resource_id = "haagent_4567" self._haa_name = "high_availability_agent" self._haa_config = { 'highavailability': { 'policy': { 'interval': 1, 'name': 'sensor', 'parameters': { 'metric': 'app_attributes:ml', 'sample_period': 600, 'sample_function': 'Average', 'cooldown_period': 20, 'scale_up_threshold': 2.0, 'scale_up_n_processes': 1, 'scale_down_threshold': 1.0, 'scale_down_n_processes': 1, 'maximum_processes': 5, 'minimum_processes': 1, } }, 'aggregator': { 'type': 'trafficsentinel', 'host': 'localhost', 'port': http_port, 'protocol': 'http', 'username': '******', 'password': '******' }, 'process_definition_id': self.process_definition_id, "process_dispatchers": [ 'process_dispatcher' ] }, 'agent': {'resource_id': self.resource_id}, } self._base_procs = self.pd_cli.list_processes() self.waiter = ProcessStateWaiter() self.waiter.start() self.container_client = ContainerAgentClient(node=self.container.node, name=self.container.name) self._haa_pid = self.container_client.spawn_process(name=self._haa_name, module="ion.agents.cei.high_availability_agent", cls="HighAvailabilityAgent", config=self._haa_config) # Start a resource agent client to talk with the instrument agent. self._haa_pyon_client = SimpleResourceAgentClient(self.resource_id, process=FakeProcess()) log.info('Got haa client %s.', str(self._haa_pyon_client)) self.haa_client = HighAvailabilityAgentClient(self._haa_pyon_client)
class HeartbeaterIntTest(IonIntegrationTestCase): @needs_eeagent def setUp(self): self._start_container() self.resource_id = "eeagent_123456789" self._eea_name = "eeagent" self.persistence_directory = tempfile.mkdtemp() self.agent_config = { 'eeagent': { 'heartbeat': "0.01", 'slots': 100, 'name': 'pyon_eeagent', 'launch_type': { 'name': 'pyon', 'persistence_directory': self.persistence_directory, } }, 'agent': {'resource_id': self.resource_id}, 'logging': { 'loggers': { 'eeagent': { 'level': 'DEBUG', 'handlers': ['console'] } }, 'root': { 'handlers': ['console'] }, } } def _start_eeagent(self): self.container_client = ContainerAgentClient( node=self.container.node, name=self.container.name) self.container = self.container_client._get_container_instance() self._eea_pid = self.container_client.spawn_process( name=self._eea_name, module="ion.agents.cei.execution_engine_agent", cls="ExecutionEngineAgent", config=self.agent_config) log.info('Agent pid=%s.', str(self._eea_pid)) # Start a resource agent client to talk with the instrument agent. self._eea_pyon_client = SimpleResourceAgentClient(self.resource_id, process=FakeProcess()) log.info('Got eea client %s.', str(self._eea_pyon_client)) self.eea_client = ExecutionEngineAgentClient(self._eea_pyon_client) def tearDown(self): self.container.terminate_process(self._eea_pid) shutil.rmtree(self.persistence_directory) @needs_eeagent @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False), 'Skip test while in CEI LAUNCH mode') def test_heartbeater(self): """test_heartbeater Test whether the eeagent waits until the eeagent listener is ready before sending a heartbeat to the PD """ # beat_died is a list because of a hack to get around a limitation in python 2.7 # See: http://stackoverflow.com/questions/8934772/local-var-referenced-before-assignment beat_died = [False] def heartbeat_callback(heartbeat, headers): eeagent_id = heartbeat['eeagent_id'] agent_client = SimpleResourceAgentClient(eeagent_id, name=eeagent_id, process=FakeProcess()) ee_client = ExecutionEngineAgentClient(agent_client, timeout=2) try: ee_client.dump_state() except: log.exception("Heartbeat Failed!") beat_died[0] = True self.beat_subscriber = HeartbeatSubscriber("heartbeat_queue", callback=heartbeat_callback, node=self.container.node) self.beat_subscriber.start() try: self._start_eeagent() for i in range(0, 5): if beat_died[0] is True: assert False, "A Hearbeat callback wasn't able to contact the eeagent" gevent.sleep(0.5) finally: self.beat_subscriber.stop()
class ExecutionEngineAgentSupdIntTest(IonIntegrationTestCase): @needs_eeagent def setUp(self): self._start_container() self.container.start_rel_from_url('res/deploy/r2cei.yml') self.resource_id = "eeagent_1234" self._eea_name = "eeagent" self.supd_directory = tempfile.mkdtemp() self.agent_config = { 'eeagent': { 'heartbeat': 0, 'slots': 100, 'name': 'pyon_eeagent', 'launch_type': { 'name': 'supd', 'pyon_directory': os.getcwd(), 'supd_directory': self.supd_directory, 'supdexe': 'bin/supervisord' }, }, 'agent': {'resource_id': self.resource_id}, 'logging': { 'loggers': { 'eeagent': { 'level': 'DEBUG', 'handlers': ['console'] } }, 'root': { 'handlers': ['console'] } } } # Start eeagent. self._eea_pid = None self.container_client = ContainerAgentClient( node=self.container.node, name=self.container.name) self._eea_pid = self.container_client.spawn_process( name=self._eea_name, module="ion.agents.cei.execution_engine_agent", cls="ExecutionEngineAgent", config=self.agent_config) log.info('Agent pid=%s.', str(self._eea_pid)) # Start a resource agent client to talk with the instrument agent. self._eea_pyon_client = SimpleResourceAgentClient(self.resource_id, process=FakeProcess()) log.info('Got eea client %s.', str(self._eea_pyon_client)) self.eea_client = ExecutionEngineAgentClient(self._eea_pyon_client) def tearDown(self): self.container.terminate_process(self._eea_pid) shutil.rmtree(self.supd_directory) def wait_for_state(self, upid, desired_state, timeout=5): attempts = 0 while timeout > attempts: state = self.eea_client.dump_state().result proc = get_proc_for_upid(state, upid) if proc.get('state') == desired_state: return gevent.sleep(1) attempts += 1 assert False, "Process %s took too long to get to %s" % (upid, desired_state) @needs_eeagent def test_basics(self): true_u_pid = "test0" round = 0 run_type = "supd" true_parameters = {'exec': 'true', 'argv': []} self.eea_client.launch_process(true_u_pid, round, run_type, true_parameters) self.wait_for_state(true_u_pid, [800, 'EXITED']) cat_u_pid = "test1" round = 0 run_type = "supd" cat_parameters = {'exec': 'cat', 'argv': []} self.eea_client.launch_process(cat_u_pid, round, run_type, cat_parameters) self.wait_for_state(cat_u_pid, [500, 'RUNNING']) self.eea_client.terminate_process(cat_u_pid, round) self.wait_for_state(cat_u_pid, [700, 'TERMINATED'])
class HighAvailabilityAgentSensorPolicyTest(IonIntegrationTestCase): def _start_webserver(self, port=None): """ Start a webserver for testing code download Note: tries really hard to get a port, and if it can't use the suggested port, randomly picks another, and returns it """ def log_message(self, format, *args): #swallow log massages pass class TestRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler): server_version = 'test_server' extensions_map = '' def do_GET(self): self.send_response(200) self.send_header("Content-type", "text/plain") self.send_header("Content-Length", len(self.server.response)) self.end_headers() self.wfile.write(self.server.response) class Server(HTTPServer): response = '' def serve_forever(self): self._serving = 1 while self._serving: self.handle_request() def stop(self): self._serving = 0 if port is None: port = 8008 Handler = TestRequestHandler Handler.log_message = log_message for i in range(0, 100): try: self._webserver = Server(("localhost", port), Handler) except socket.error: print "port %s is in use, picking another" % port port = randint(8000, 10000) continue else: break self._web_glet = gevent.spawn(self._webserver.serve_forever) return port def _stop_webserver(self): if self._webserver is not None: self._webserver.stop() gevent.sleep(2) self._web_glet.kill() @needs_epu def setUp(self): self._start_container() self.container.start_rel_from_url('res/deploy/r2cei.yml') self.pd_cli = ProcessDispatcherServiceClient(to_name="process_dispatcher") self.process_definition_id = uuid4().hex self.process_definition = ProcessDefinition(name='test', executable={ 'module': 'ion.agents.cei.test.test_haagent', 'class': 'TestProcess' }) self.pd_cli.create_process_definition(self.process_definition, self.process_definition_id) http_port = 8919 http_port = self._start_webserver(port=http_port) self.resource_id = "haagent_4567" self._haa_name = "high_availability_agent" self._haa_config = { 'highavailability': { 'policy': { 'interval': 1, 'name': 'sensor', 'parameters': { 'metric': 'app_attributes:ml', 'sample_period': 600, 'sample_function': 'Average', 'cooldown_period': 20, 'scale_up_threshold': 2.0, 'scale_up_n_processes': 1, 'scale_down_threshold': 1.0, 'scale_down_n_processes': 1, 'maximum_processes': 5, 'minimum_processes': 1, } }, 'aggregator': { 'type': 'trafficsentinel', 'host': 'localhost', 'port': http_port, 'protocol': 'http', 'username': '******', 'password': '******' }, 'process_definition_id': self.process_definition_id, "process_dispatchers": [ 'process_dispatcher' ] }, 'agent': {'resource_id': self.resource_id}, } self._base_procs = self.pd_cli.list_processes() self.waiter = ProcessStateWaiter() self.waiter.start() self.container_client = ContainerAgentClient(node=self.container.node, name=self.container.name) self._haa_pid = self.container_client.spawn_process(name=self._haa_name, module="ion.agents.cei.high_availability_agent", cls="HighAvailabilityAgent", config=self._haa_config) # Start a resource agent client to talk with the instrument agent. self._haa_pyon_client = SimpleResourceAgentClient(self.resource_id, process=FakeProcess()) log.info('Got haa client %s.', str(self._haa_pyon_client)) self.haa_client = HighAvailabilityAgentClient(self._haa_pyon_client) def tearDown(self): self.waiter.stop() self.container.terminate_process(self._haa_pid) self._stop_webserver() self._stop_container() def get_running_procs(self): """returns a normalized set of running procs (removes the ones that were there at setup time) """ base = self._base_procs base_pids = [proc.process_id for proc in base] current = self.pd_cli.list_processes() current_pids = [proc.process_id for proc in current] print "filtering base procs %s from %s" % (base_pids, current_pids) normal = [cproc for cproc in current if cproc.process_id not in base_pids and cproc.process_state == ProcessStateEnum.RUNNING] return normal def _get_managed_upids(self): result = self.haa_client.dump().result upids = result['managed_upids'] return upids def _set_response(self, response): self._webserver.response = response def test_sensor_policy(self): status = self.haa_client.status().result # Ensure HA hasn't already failed assert status in ('PENDING', 'READY', 'STEADY') self.waiter.await_state_event(state=ProcessStateEnum.RUNNING) self.assertEqual(len(self.get_running_procs()), 1) for i in range(0, 5): status = self.haa_client.status().result try: self.assertEqual(status, 'STEADY') break except: gevent.sleep(1) else: assert False, "HA Service took too long to get to state STEADY" # Set ml for each proc such that we scale up upids = self._get_managed_upids() response = "" for upid in upids: response += "%s,ml=5\n" self._set_response(response) self.waiter.await_state_event(state=ProcessStateEnum.RUNNING) self.assertEqual(len(self.get_running_procs()), 2) # Set ml so we stay steady upids = self._get_managed_upids() response = "" for upid in upids: response += "%s,ml=1.5\n" self._set_response(response) self.assertEqual(len(self.get_running_procs()), 2) for i in range(0, 5): status = self.haa_client.status().result try: self.assertEqual(status, 'STEADY') break except: gevent.sleep(1) else: assert False, "HA Service took too long to get to state STEADY" # Set ml so we scale down upids = self._get_managed_upids() response = "" for upid in upids: response += "%s,ml=0.5\n" self._set_response(response) self.waiter.await_state_event(state=ProcessStateEnum.TERMINATED) self.assertEqual(len(self.get_running_procs()), 1) for i in range(0, 5): status = self.haa_client.status().result try: self.assertEqual(status, 'STEADY') break except: gevent.sleep(1) else: assert False, "HA Service took too long to get to state STEADY"
def _initialize(self): """ Start port agent, add port agent cleanup. Start container. Start deploy services. Define agent config, start agent. Start agent client. """ try: """ Get the information for the driver. This can be read from the yml files; the user can run switch_driver to change the current driver. """ self.fetch_metadata() self.fetch_driver_class() self.fetch_comm_config() if not exists(PIPE_PATH): mkfifo(PIPE_PATH) if not exists(self.metadata.driver_dir()): raise DriverDoesNotExist( "%s/%s/$%s" % (self.metadata.driver_make, self.metadata.driver_model, self.driver_name)) driver_module = DRIVER_MODULE_ROOT + self.metadata.driver_make + '.' + self.metadata.driver_model + '.' + self.metadata.driver_name + DRIVER_MODULE_LEAF log.info('driver module: %s', driver_module) log.info('driver class: %s', self.driver_class) log.info('device address: %s', self.ip_address) log.info('device data port: %s', self.data_port) log.info('device command port: %s', self.command_port) log.info('log delimiter: %s', DELIM) log.info('work dir: %s', WORK_DIR) DVR_CONFIG.update({ 'dvr_mod': driver_module, 'dvr_cls': self.driver_class }) """ self._support = DriverIntegrationTestSupport(driver_module, self.driver_class, self.ip_address, self.data_port, DELIM, WORK_DIR) """ # Start port agent, add stop to cleanup (not sure if that's # necessary yet). print( "------------------>>>> Starting Port Agent <<<<------------------" ) self.start_pagent() # Start a monitor window if specified. if self.monitor_window: self.monitor_file = self._pagent.port_agent.logfname strXterm = "xterm -T InstrumentMonitor -sb -rightbar" #pOpenString = "xterm -T InstrumentMonitor -e tail -f " + self.monitor_file pOpenString = strXterm + " -e tail -f " + self.monitor_file x = subprocess.Popen(pOpenString, shell=True) """ DHE: Added self._cleanups to make base classes happy """ self.addCleanup(self.stop_pagent) # Start container. print( "------------------>>>> Starting Capability Container <<<<------------------" ) self._start_container() # Bring up services in a deploy file (no need to message) print( "------------------>>>> Starting Deploy Services <<<<------------------" ) self.container.start_rel_from_url('res/deploy/r2deploy.yml') # Setup stream config. self._build_stream_config() # Create agent config. agent_config = { 'driver_config': DVR_CONFIG, 'stream_config': self.stream_config, 'agent': { 'resource_id': IA_RESOURCE_ID }, 'test_mode': True } # Start instrument agent. self._ia_pid = None print( "------------------>>>> Starting Instrument Agent <<<<------------------" ) container_client = ContainerAgentClient(node=self.container.node, name=self.container.name) self._ia_pid = container_client.spawn_process(name=IA_NAME, module=IA_MOD, cls=IA_CLS, config=agent_config) log.info('Agent pid=%s.', str(self._ia_pid)) # Start a resource agent client to talk with the instrument agent. self._ia_client = None self._ia_client = ResourceAgentClient(IA_RESOURCE_ID, process=FakeProcess()) log.info('Got ia client %s.', str(self._ia_client)) if self.subcriber_window: self._start_data_subscribers(6) #self.addCleanup(self._stop_data_subscribers) except: log.error("initialize(): Exception occurred; shutting down.", exc_info=True) return False else: return True
class ExecutionEngineAgentPyonIntTest(IonIntegrationTestCase): _webserver = None @needs_eeagent def setUp(self): self._start_container() self.container.start_rel_from_url('res/deploy/r2cei.yml') self.resource_id = "eeagent_123456789" self._eea_name = "eeagent" self.persistence_directory = tempfile.mkdtemp() self.agent_config = { 'eeagent': { 'heartbeat': 1, 'slots': 100, 'name': 'pyon_eeagent', 'launch_type': { 'name': 'pyon', 'persistence_directory': self.persistence_directory, } }, 'agent': {'resource_id': self.resource_id}, 'logging': { 'loggers': { 'eeagent': { 'level': 'DEBUG', 'handlers': ['console'] } }, 'root': { 'handlers': ['console'] }, } } self._start_eeagent() def _start_eeagent(self): self.container_client = ContainerAgentClient( node=self.container.node, name=self.container.name) self.container = self.container_client._get_container_instance() # Start eeagent. self._eea_pid = self.container_client.spawn_process( name=self._eea_name, module="ion.agents.cei.execution_engine_agent", cls="ExecutionEngineAgent", config=self.agent_config) log.info('Agent pid=%s.', str(self._eea_pid)) # Start a resource agent client to talk with the instrument agent. self._eea_pyon_client = SimpleResourceAgentClient(self.resource_id, process=FakeProcess()) log.info('Got eea client %s.', str(self._eea_pyon_client)) self.eea_client = ExecutionEngineAgentClient(self._eea_pyon_client) def tearDown(self): self._stop_webserver() self.container.terminate_process(self._eea_pid) shutil.rmtree(self.persistence_directory) def _start_webserver(self, directory_to_serve, port=None): """ Start a webserver for testing code download Note: tries really hard to get a port, and if it can't use the suggested port, randomly picks another, and returns it """ def log_message(self, format, *args): #swallow log massages pass class Server(HTTPServer): requests = 0 def serve_forever(self): self._serving = 1 while self._serving: self.handle_request() self.requests += 1 def stop(self): self._serving = 0 if port is None: port = 8008 Handler = SimpleHTTPServer.SimpleHTTPRequestHandler Handler.log_message = log_message for i in range(0, 100): try: self._webserver = Server(("localhost", port), Handler) except socket.error: print "port %s is in use, picking another" % port port = randint(8000, 10000) continue else: break self._web_glet = gevent.spawn(self._webserver.serve_forever) return port def _stop_webserver(self): if self._webserver is not None: self._web_glet.kill() def _enable_code_download(self, whitelist=None): if whitelist is None: whitelist = [] self.container.terminate_process(self._eea_pid) self.agent_config['eeagent']['code_download'] = { 'enabled': True, 'whitelist': whitelist } self._start_eeagent() def wait_for_state(self, upid, desired_state, timeout=30): attempts = 0 last_state = None while timeout > attempts: try: state = self.eea_client.dump_state().result except Timeout: log.warn("Timeout calling EEAgent dump_state. retrying.") continue proc = get_proc_for_upid(state, upid) last_state = proc.get('state') if last_state == desired_state: return gevent.sleep(1) attempts += 1 assert False, "Process %s took too long to get to %s, had %s" % (upid, desired_state, last_state) @needs_eeagent def test_basics(self): u_pid = "test0" round = 0 run_type = "pyon" proc_name = 'test_x' module = 'ion.agents.cei.test.test_eeagent' cls = 'TestProcess' parameters = {'name': proc_name, 'module': module, 'cls': cls} self.eea_client.launch_process(u_pid, round, run_type, parameters) self.wait_for_state(u_pid, [500, 'RUNNING']) state = self.eea_client.dump_state().result assert len(state['processes']) == 1 self.eea_client.terminate_process(u_pid, round) self.wait_for_state(u_pid, [700, 'TERMINATED']) state = self.eea_client.dump_state().result assert len(state['processes']) == 1 self.eea_client.cleanup_process(u_pid, round) state = self.eea_client.dump_state().result assert len(state['processes']) == 0 @needs_eeagent def test_duplicate(self): u_pid = "test0" round = 0 run_type = "pyon" proc_name = 'test_x' module = 'ion.agents.cei.test.test_eeagent' cls = 'TestProcess' parameters = {'name': proc_name, 'module': module, 'cls': cls} self.eea_client.launch_process(u_pid, round, run_type, parameters) self.wait_for_state(u_pid, [500, 'RUNNING']) self.eea_client.launch_process(u_pid, round, run_type, parameters) self.wait_for_state(u_pid, [500, 'RUNNING']) state = self.eea_client.dump_state().result assert len(state['processes']) == 1 self.eea_client.terminate_process(u_pid, round) self.wait_for_state(u_pid, [700, 'TERMINATED']) state = self.eea_client.dump_state().result assert len(state['processes']) == 1 self.eea_client.cleanup_process(u_pid, round) state = self.eea_client.dump_state().result assert len(state['processes']) == 0 @needs_eeagent def test_restart(self): u_pid = "test0" round = 0 run_type = "pyon" proc_name = 'test_x' module = 'ion.agents.cei.test.test_eeagent' cls = 'TestProcess' parameters = {'name': proc_name, 'module': module, 'cls': cls} self.eea_client.launch_process(u_pid, round, run_type, parameters) self.wait_for_state(u_pid, [500, 'RUNNING']) state = self.eea_client.dump_state().result assert len(state['processes']) == 1 # Start again with incremented round. eeagent should restart the process round += 1 self.eea_client.launch_process(u_pid, round, run_type, parameters) self.wait_for_state(u_pid, [500, 'RUNNING']) state = self.eea_client.dump_state().result ee_round = state['processes'][0]['round'] assert round == int(ee_round) # TODO: this test is disabled, as the restart op is disabled # Run restart with incremented round. eeagent should restart the process #round += 1 #self.eea_client.restart_process(u_pid, round) #self.wait_for_state(u_pid, [500, 'RUNNING']) #state = self.eea_client.dump_state().result #ee_round = state['processes'][0]['round'] #assert round == int(ee_round) self.eea_client.terminate_process(u_pid, round) self.wait_for_state(u_pid, [700, 'TERMINATED']) state = self.eea_client.dump_state().result assert len(state['processes']) == 1 self.eea_client.cleanup_process(u_pid, round) state = self.eea_client.dump_state().result assert len(state['processes']) == 0 @needs_eeagent def test_failing_process(self): u_pid = "testfail" round = 0 run_type = "pyon" proc_name = 'test_x' module = 'ion.agents.cei.test.test_eeagent' cls = 'TestProcessFail' parameters = {'name': proc_name, 'module': module, 'cls': cls} self.eea_client.launch_process(u_pid, round, run_type, parameters) self.wait_for_state(u_pid, [850, 'FAILED']) self.eea_client.terminate_process(u_pid, round) state = self.eea_client.dump_state().result get_proc_for_upid(state, u_pid) @needs_eeagent def test_slow_to_start(self): upids = map(lambda i: str(uuid.uuid4().hex), range(0, 10)) round = 0 run_type = "pyon" proc_name = 'test_x' module = 'ion.agents.cei.test.test_eeagent' cls = 'TestProcessSlowStart' parameters = {'name': proc_name, 'module': module, 'cls': cls} for upid in upids: self.eea_client.launch_process(upid, round, run_type, parameters) for upid in upids: self.wait_for_state(upid, [500, 'RUNNING'], timeout=60) @needs_eeagent def test_start_cancel(self): upid = str(uuid.uuid4().hex) round = 0 run_type = "pyon" proc_name = 'test_x' module = 'ion.agents.cei.test.test_eeagent' cls = 'TestProcessSlowStart' parameters = {'name': proc_name, 'module': module, 'cls': cls} self.eea_client.launch_process(upid, round, run_type, parameters) self.wait_for_state(upid, [400, 'PENDING']) self.eea_client.terminate_process(upid, round) self.wait_for_state(upid, [700, 'TERMINATED']) @needs_eeagent def test_kill_and_revive(self): """test_kill_and_revive Ensure that when an eeagent dies, it pulls the processes it owned from persistence, and marks them as failed, so the PD can figure out what to do with them """ u_pid = "test0" round = 0 run_type = "pyon" proc_name = 'test_transform' module = 'ion.agents.cei.test.test_eeagent' cls = 'TestProcess' parameters = {'name': proc_name, 'module': module, 'cls': cls} self.eea_client.launch_process(u_pid, round, run_type, parameters) self.wait_for_state(u_pid, [500, 'RUNNING']) # Kill and restart eeagent. Also, kill proc started by eea to simulate # a killed container old_eea_pid = str(self._eea_pid) self.container.terminate_process(self._eea_pid) proc_to_kill = self.container.proc_manager.procs_by_name.get(proc_name) self.assertIsNotNone(proc_to_kill) self.container.terminate_process(proc_to_kill.id) self._start_eeagent() self.assertNotEqual(old_eea_pid, self._eea_pid) self.wait_for_state(u_pid, [850, 'FAILED']) @needs_eeagent def test_run_out_of_slots(self): """test_run_out_of_slots """ old_eea_pid = str(self._eea_pid) self.container.terminate_process(self._eea_pid) self.agent_config['eeagent']['slots'] = 1 self._start_eeagent() self.assertNotEqual(old_eea_pid, self._eea_pid) u_pid_0, u_pid_1 = "test0", "test1" round = 0 run_type = "pyon" proc_name = 'test_transform' module = 'ion.agents.cei.test.test_eeagent' cls = 'TestProcess' parameters = {'name': proc_name, 'module': module, 'cls': cls} self.eea_client.launch_process(u_pid_0, round, run_type, parameters) self.wait_for_state(u_pid_0, [500, 'RUNNING']) self.eea_client.launch_process(u_pid_1, round, run_type, parameters) self.wait_for_state(u_pid_1, [900, 'REJECTED']) old_eea_pid = str(self._eea_pid) self.container.terminate_process(self._eea_pid) self.agent_config['eeagent']['slots'] = 1 self._start_eeagent() self.assertNotEqual(old_eea_pid, self._eea_pid) self.wait_for_state(u_pid_0, [850, 'FAILED']) self.wait_for_state(u_pid_1, [900, 'REJECTED']) @needs_eeagent def test_download_code(self): self._enable_code_download(whitelist=['*']) u_pid = "test0" round = 0 run_type = "pyon" proc_name = 'test_transform' module = "ion.my.module.to.download" module_uri = 'file://%s/downloads/module_to_download.py' % get_this_directory() bad_module_uri = 'file:///tmp/notreal/module_to_download.py' cls = 'TestDownloadProcess' parameters = {'name': proc_name, 'module': module, 'module_uri': bad_module_uri, 'cls': cls} response = self.eea_client.launch_process(u_pid, round, run_type, parameters) print response assert response.status == 404 assert "Unable to download" in response.result parameters = {'name': proc_name, 'module': module, 'module_uri': module_uri, 'cls': cls} round += 1 self.eea_client.launch_process(u_pid, round, run_type, parameters) self.wait_for_state(u_pid, [500, 'RUNNING']) self.eea_client.terminate_process(u_pid, round) state = self.eea_client.dump_state().result get_proc_for_upid(state, u_pid) @needs_eeagent def test_whitelist(self): downloads_directory = os.path.join(get_this_directory(), "downloads") http_port = 8910 http_port = self._start_webserver(downloads_directory, port=http_port) while self._webserver is None: print "Waiting for webserver to come up" gevent.sleep(1) assert self._webserver.requests == 0 u_pid = "test0" round = 0 run_type = "pyon" proc_name = 'test_transform' module = "ion.my.module" module_uri = "http://localhost:%s/ion/agents/cei/test/downloads/module_to_download.py" % http_port cls = 'TestDownloadProcess' parameters = {'name': proc_name, 'module': module, 'module_uri': module_uri, 'cls': cls} response = self.eea_client.launch_process(u_pid, round, run_type, parameters) assert response.status == 401 assert "Code download not enabled" in response.result # Test no whitelist self._enable_code_download() round += 1 response = self.eea_client.launch_process(u_pid, round, run_type, parameters) print response assert response.status == 401 assert "not in code_download whitelist" in response.result # Test not matching self._enable_code_download(whitelist=['blork']) round += 1 response = self.eea_client.launch_process(u_pid, round, run_type, parameters) assert response.status == 401 assert "not in code_download whitelist" in response.result # Test exact matching self._enable_code_download(whitelist=['localhost']) round += 1 response = self.eea_client.launch_process(u_pid, round, run_type, parameters) self.wait_for_state(u_pid, [500, 'RUNNING']) self.eea_client.terminate_process(u_pid, round) state = self.eea_client.dump_state().result get_proc_for_upid(state, u_pid) # Test wildcard self._enable_code_download(whitelist=['*']) round += 1 response = self.eea_client.launch_process(u_pid, round, run_type, parameters) self.wait_for_state(u_pid, [500, 'RUNNING']) self.eea_client.terminate_process(u_pid, round) state = self.eea_client.dump_state().result get_proc_for_upid(state, u_pid) @needs_eeagent def test_caching(self): downloads_directory = os.path.join(get_this_directory(), "downloads") http_port = 8910 http_port = self._start_webserver(downloads_directory, port=http_port) while self._webserver is None: print "Waiting for webserver to come up" gevent.sleep(1) self._enable_code_download(['*']) assert self._webserver.requests == 0 u_pid = "test0" round = 0 run_type = "pyon" proc_name = 'test_transform' module = "ion.my.module" module_uri = "http://localhost:%s/ion/agents/cei/test/downloads/module_to_download.py" % http_port cls = 'TestDownloadProcess' parameters = {'name': proc_name, 'module': module, 'module_uri': module_uri, 'cls': cls} # Launch a process, check that webserver is hit self.eea_client.launch_process(u_pid, round, run_type, parameters) self.wait_for_state(u_pid, [500, 'RUNNING']) self.eea_client.terminate_process(u_pid, round) state = self.eea_client.dump_state().result get_proc_for_upid(state, u_pid) assert self._webserver.requests == 1 # Launch another process, check that webserver is still only hit once self.eea_client.launch_process(u_pid, round, run_type, parameters) self.wait_for_state(u_pid, [500, 'RUNNING']) self.eea_client.terminate_process(u_pid, round) state = self.eea_client.dump_state().result get_proc_for_upid(state, u_pid) assert self._webserver.requests == 1 u_pid = "test5" round = 0 run_type = "pyon" proc_name = 'test_transformx' module = "ion.agents.cei.test.test_eeagent" module_uri = "http://localhost:%s/ion/agents/cei/test/downloads/module_to_download.py" % http_port cls = 'TestProcess' parameters = {'name': proc_name, 'module': module, 'module_uri': module_uri, 'cls': cls} # Test that a module that is already available in tarball won't trigger a download self.eea_client.launch_process(u_pid, round, run_type, parameters) self.wait_for_state(u_pid, [500, 'RUNNING']) self.eea_client.terminate_process(u_pid, round) state = self.eea_client.dump_state().result get_proc_for_upid(state, u_pid) assert self._webserver.requests == 1 u_pid = "test9" round = 0 run_type = "pyon" proc_name = 'test_transformx' module = "ion.agents.cei.test.test_eeagent" module_uri = "http://localhost:%s/ion/agents/cei/test/downloads/module_to_download.py" % http_port cls = 'TestProcessNotReal' parameters = {'name': proc_name, 'module': module, 'module_uri': module_uri, 'cls': cls} # Test behaviour of a non existant class with no download self.eea_client.launch_process(u_pid, round, run_type, parameters) self.wait_for_state(u_pid, [850, 'FAILED']) self.eea_client.terminate_process(u_pid, round) state = self.eea_client.dump_state().result get_proc_for_upid(state, u_pid)
def setUp(self): """ Start fake terrestrial components and add cleanup. Start terrestrial server and retrieve port. Set internal variables. Start container. Start deployment. Start container agent. Spawn remote endpoint process. Create remote endpoint client and retrieve remote server port. Create event publisher. """ self._terrestrial_server = R3PCServer(self.consume_req, self.terrestrial_server_close) self._terrestrial_client = R3PCClient(self.consume_ack, self.terrestrial_client_close) self.addCleanup(self._terrestrial_server.stop) self.addCleanup(self._terrestrial_client.stop) self._other_port = self._terrestrial_server.start('*', 0) log.debug('Terrestrial server binding to *:%i', self._other_port) self._other_host = 'localhost' self._platform_resource_id = 'abc123' self._resource_id = 'fake_id' self._no_requests = 10 self._requests_sent = {} self._results_recv = {} self._no_telem_events = 0 self._done_evt = AsyncResult() self._done_telem_evts = AsyncResult() self._cmd_tx_evt = AsyncResult() # Start container. log.debug('Staring capability container.') self._start_container() # Bring up services in a deploy file (no need to message). log.info('Staring deploy services.') self.container.start_rel_from_url('res/deploy/r2deploy.yml') # Create a container client. log.debug('Creating container client.') container_client = ContainerAgentClient(node=self.container.node, name=self.container.name) # Create agent config. endpoint_config = { 'other_host' : self._other_host, 'other_port' : self._other_port, 'this_port' : 0, 'platform_resource_id' : self._platform_resource_id } # Spawn the remote enpoint process. log.debug('Spawning remote endpoint process.') re_pid = container_client.spawn_process( name='remote_endpoint_1', module='ion.services.sa.tcaa.remote_endpoint', cls='RemoteEndpoint', config=endpoint_config) log.debug('Endpoint pid=%s.', str(re_pid)) # Create an endpoint client. self.re_client = RemoteEndpointClient( process=FakeProcess(), to_name=re_pid) log.debug('Got re client %s.', str(self.re_client)) # Remember the remote port. self._this_port = self.re_client.get_port() log.debug('The remote port is: %i.', self._this_port) # Start the event publisher. self._event_publisher = EventPublisher()
def setUp(self): """ Set up driver integration support. Start port agent, add port agent cleanup. Start container. Start deploy services. Define agent config, start agent. Start agent client. """ print '#####################' print 'IN SETUP' self._ia_client = None # Start container. log.info('Staring capability container.') self._start_container() # Bring up services in a deploy file (no need to message) log.info('Staring deploy services.') self.container.start_rel_from_url('res/deploy/r2deploy.yml') log.info('building stream configuration') # Setup stream config. self._build_stream_config() #log.info('driver uri: %s', DRV_URI) #log.info('device address: %s', DEV_ADDR) #log.info('device port: %s', DEV_PORT) #log.info('work dir: %s', WORK_DIR) # Create agent config. agent_config = { 'driver_config': DVR_CONFIG, 'stream_config': self._stream_config, 'agent': { 'resource_id': IA_RESOURCE_ID }, 'test_mode': True, 'forget_past': True, 'enable_persistence': False } #if org_governance_name is not None: # agent_config['org_governance_name'] = org_governance_name # Start instrument agent. log.info("TestInstrumentAgent.setup(): starting IA.") container_client = ContainerAgentClient(node=self.container.node, name=self.container.name) log.info("Agent setup") ia_pid = container_client.spawn_process(name=IA_NAME, module=IA_MOD, cls=IA_CLS, config=agent_config) log.info('Agent pid=%s.', str(ia_pid)) self.addCleanup(self._verify_agent_reset) # Start a resource agent client to talk with the instrument agent. self._ia_client = ResourceAgentClient(IA_RESOURCE_ID, process=FakeProcess()) log.info('Got ia client %s.', str(self._ia_client)) log.info('test setup complete')
def instrument_test_driver(container): org_client = OrgManagementServiceClient(node=container.node) id_client = IdentityManagementServiceClient(node=container.node) system_actor = id_client.find_actor_identity_by_name(name=CFG.system.system_actor) log.info('system actor:' + system_actor._id) sa_header_roles = get_role_message_headers(org_client.find_all_roles_by_user(system_actor._id)) # Names of agent data streams to be configured. parsed_stream_name = 'ctd_parsed' raw_stream_name = 'ctd_raw' # Driver configuration. #Simulator driver_config = { 'svr_addr': 'localhost', 'cmd_port': 5556, 'evt_port': 5557, 'dvr_mod': 'ion.agents.instrument.drivers.sbe37.sbe37_driver', 'dvr_cls': 'SBE37Driver', 'comms_config': { SBE37Channel.CTD: { 'method':'ethernet', 'device_addr': CFG.device.sbe37.host, 'device_port': CFG.device.sbe37.port, 'server_addr': 'localhost', 'server_port': 8888 } } } #Hardware _container_client = ContainerAgentClient(node=container.node, name=container.name) # Create a pubsub client to create streams. _pubsub_client = PubsubManagementServiceClient(node=container.node) # A callback for processing subscribed-to data. def consume(message, headers): log.info('Subscriber received message: %s', str(message)) # Create a stream subscriber registrar to create subscribers. subscriber_registrar = StreamSubscriberRegistrar(process=container, node=container.node) subs = [] # Create streams for each stream named in driver. stream_config = {} for (stream_name, val) in PACKET_CONFIG.iteritems(): stream_def = ctd_stream_definition(stream_id=None) stream_def_id = _pubsub_client.create_stream_definition( container=stream_def) stream_id = _pubsub_client.create_stream( name=stream_name, stream_definition_id=stream_def_id, original=True, encoding='ION R2', headers={'ion-actor-id': system_actor._id, 'ion-actor-roles': sa_header_roles }) stream_config[stream_name] = stream_id # Create subscriptions for each stream. exchange_name = '%s_queue' % stream_name sub = subscriber_registrar.create_subscriber(exchange_name=exchange_name, callback=consume) sub.start() query = StreamQuery(stream_ids=[stream_id]) sub_id = _pubsub_client.create_subscription(\ query=query, exchange_name=exchange_name ) _pubsub_client.activate_subscription(sub_id) subs.append(sub) # Create agent config. agent_resource_id = '123xyz' agent_config = { 'driver_config' : driver_config, 'stream_config' : stream_config, 'agent' : {'resource_id': agent_resource_id} } # Launch an instrument agent process. _ia_name = 'agent007' _ia_mod = 'ion.agents.instrument.instrument_agent' _ia_class = 'InstrumentAgent' _ia_pid = _container_client.spawn_process(name=_ia_name, module=_ia_mod, cls=_ia_class, config=agent_config) log.info('got pid=%s for resource_id=%s' % (str(_ia_pid), str(agent_resource_id)))
class HighAvailabilityAgentTest(IonIntegrationTestCase): @needs_epu def setUp(self): self._start_container() self.container.start_rel_from_url('res/deploy/r2cei.yml') #self.pd_cli = ProcessDispatcherServiceClient(node=self.container.node) self.pd_cli = ProcessDispatcherServiceClient(to_name="process_dispatcher") self.process_definition_id = uuid4().hex self.process_definition_name = 'test' self.process_definition = ProcessDefinition(name=self.process_definition_name, executable={ 'module': 'ion.agents.cei.test.test_haagent', 'class': 'TestProcess' }) self.pd_cli.create_process_definition(self.process_definition, self.process_definition_id) self.resource_id = "haagent_1234" self._haa_name = "high_availability_agent" self._haa_dashi_name = "dashi_haa_" + uuid4().hex self._haa_dashi_uri = get_dashi_uri_from_cfg() self._haa_dashi_exchange = "%s.hatests" % bootstrap.get_sys_name() self._haa_config = { 'highavailability': { 'policy': { 'interval': 1, 'name': 'npreserving', 'parameters': { 'preserve_n': 0 } }, 'process_definition_id': self.process_definition_id, 'dashi_messaging' : True, 'dashi_exchange' : self._haa_dashi_exchange, 'dashi_name': self._haa_dashi_name }, 'agent': {'resource_id': self.resource_id}, } self._base_services, _ = self.container.resource_registry.find_resources( restype="Service", name=self.process_definition_name) self._base_procs = self.pd_cli.list_processes() self.waiter = ProcessStateWaiter() self.waiter.start() self.container_client = ContainerAgentClient(node=self.container.node, name=self.container.name) self._haa_pid = self.container_client.spawn_process(name=self._haa_name, module="ion.agents.cei.high_availability_agent", cls="HighAvailabilityAgent", config=self._haa_config) # Start a resource agent client to talk with the instrument agent. self._haa_pyon_client = SimpleResourceAgentClient(self.resource_id, process=FakeProcess()) log.info('Got haa client %s.', str(self._haa_pyon_client)) self.haa_client = HighAvailabilityAgentClient(self._haa_pyon_client) def tearDown(self): self.waiter.stop() try: self.container.terminate_process(self._haa_pid) except BadRequest: log.warning("Couldn't terminate HA Agent in teardown (May have been terminated by a test)") self._stop_container() def get_running_procs(self): """returns a normalized set of running procs (removes the ones that were there at setup time) """ base = self._base_procs base_pids = [proc.process_id for proc in base] current = self.pd_cli.list_processes() current_pids = [proc.process_id for proc in current] print "filtering base procs %s from %s" % (base_pids, current_pids) normal = [cproc for cproc in current if cproc.process_id not in base_pids and cproc.process_state == ProcessStateEnum.RUNNING] return normal def get_new_services(self): base = self._base_services base_names = [i.name for i in base] services_registered, _ = self.container.resource_registry.find_resources( restype="Service", name=self.process_definition_name) current_names = [i.name for i in services_registered] normal = [cserv for cserv in services_registered if cserv.name not in base_names] return normal def await_ha_state(self, want_state, timeout=10): for i in range(0, timeout): status = self.haa_client.status().result if status == want_state: return gevent.sleep(1) raise Exception("Took more than %s to get to ha state %s" % (timeout, want_state)) def test_features(self): status = self.haa_client.status().result # Ensure HA hasn't already failed assert status in ('PENDING', 'READY', 'STEADY') # verifies L4-CI-CEI-RQ44 # Note: the HA agent is started in the setUp() method, with config # pointing to the test "service". The initial config is set to preserve # 0 service processes. With this reconfigure step below, we change that # to launch 1. new_policy = {'preserve_n': 1} self.haa_client.reconfigure_policy(new_policy) result = self.haa_client.dump().result self.assertEqual(result['policy'], new_policy) self.waiter.await_state_event(state=ProcessStateEnum.RUNNING) self.assertEqual(len(self.get_running_procs()), 1) for i in range(0, 5): status = self.haa_client.status().result try: self.assertEqual(status, 'STEADY') break except: gevent.sleep(1) else: assert False, "HA Service took too long to get to state STEADY" # verifies L4-CI-CEI-RQ122 and L4-CI-CEI-RQ124 new_policy = {'preserve_n': 2} self.haa_client.reconfigure_policy(new_policy) self.waiter.await_state_event(state=ProcessStateEnum.RUNNING) self.assertEqual(len(self.get_running_procs()), 2) new_policy = {'preserve_n': 1} self.haa_client.reconfigure_policy(new_policy) self.waiter.await_state_event(state=ProcessStateEnum.TERMINATED) self.assertEqual(len(self.get_running_procs()), 1) new_policy = {'preserve_n': 0} self.haa_client.reconfigure_policy(new_policy) self.waiter.await_state_event(state=ProcessStateEnum.TERMINATED) self.assertEqual(len(self.get_running_procs()), 0) def test_associations(self): # Ensure that once the HA Agent starts, there is a Service object in # the registry result = self.haa_client.dump().result service_id = result.get('service_id') self.assertIsNotNone(service_id) service = self.container.resource_registry.read(service_id) self.assertIsNotNone(service) # Ensure that once a process is started, there is an association between # it and the service new_policy = {'preserve_n': 1} self.haa_client.reconfigure_policy(new_policy) self.waiter.await_state_event(state=ProcessStateEnum.RUNNING) self.assertEqual(len(self.get_running_procs()), 1) self.await_ha_state('STEADY') proc = self.get_running_procs()[0] processes_associated, _ = self.container.resource_registry.find_resources( restype="Process", name=proc.process_id) self.assertEqual(len(processes_associated), 1) has_processes = self.container.resource_registry.find_associations( service, "hasProcess") self.assertEqual(len(has_processes), 1) self.await_ha_state('STEADY') # Ensure that once we terminate that process, there are no associations new_policy = {'preserve_n': 0} self.haa_client.reconfigure_policy(new_policy) self.waiter.await_state_event(state=ProcessStateEnum.TERMINATED) self.assertEqual(len(self.get_running_procs()), 0) processes_associated, _ = self.container.resource_registry.find_resources( restype="Process", name=proc.process_id) self.assertEqual(len(processes_associated), 0) has_processes = self.container.resource_registry.find_associations( service, "hasProcess") self.assertEqual(len(has_processes), 0) # Ensure that once we terminate that HA Agent, the Service object is # cleaned up self.container.terminate_process(self._haa_pid) with self.assertRaises(NotFound): service = self.container.resource_registry.read(service_id) def test_dashi(self): import dashi dashi_conn = dashi.DashiConnection("something", self._haa_dashi_uri, self._haa_dashi_exchange) status = dashi_conn.call(self._haa_dashi_name, "status") assert status in ('PENDING', 'READY', 'STEADY') new_policy = {'preserve_n': 0} dashi_conn.call(self._haa_dashi_name, "reconfigure_policy", new_policy=new_policy)