def __init__(self, xp_name=None, event_type=None, origin=None, queue_name=None, sub_type=None, origin_type=None, pattern=None): self.event_type = event_type self.sub_type = sub_type self.origin_type = origin_type self.origin = origin xp_name = xp_name or get_events_exchange_point() if pattern: binding = pattern else: binding = self._topic(event_type, origin, sub_type, origin_type) self.binding = binding # TODO: Provide a case where we can have multiple bindings (e.g. different event_types) # prefix the queue_name, if specified, with the sysname # this is because queue names transcend xp boundaries (see R1 OOIION-477) if queue_name is not None: if not queue_name.startswith(bootstrap.get_sys_name()): queue_name = "%s.%s" % (bootstrap.get_sys_name(), queue_name) log.warn("queue_name specified, prepending sys_name to it: %s", queue_name) # set this name to be picked up by inherited folks self._ev_recv_name = (xp_name, queue_name)
def _force_clean(cls, recreate=False, initial=False): # Database resources from pyon.core.bootstrap import get_sys_name, CFG from pyon.datastore.datastore_common import DatastoreFactory datastore = DatastoreFactory.get_datastore(config=CFG, variant=DatastoreFactory.DS_BASE, scope=get_sys_name()) if initial: datastore._init_database(datastore.database) dbs = datastore.list_datastores() clean_prefix = '%s_' % get_sys_name().lower() things_to_clean = [x for x in dbs if x.startswith(clean_prefix)] try: for thing in things_to_clean: datastore.delete_datastore(datastore_name=thing) if recreate: datastore.create_datastore(datastore_name=thing) finally: datastore.close() # Broker resources from putil.rabbitmq.rabbit_util import RabbitManagementUtil rabbit_util = RabbitManagementUtil(CFG, sysname=bootstrap.get_sys_name()) deleted_exchanges, deleted_queues = rabbit_util.clean_by_sysname() log.info("Deleted %s exchanges, %s queues" % (len(deleted_exchanges), len(deleted_queues))) # File system from pyon.util.file_sys import FileSystem FileSystem._clean(CFG)
def __init__( self, xp_name=None, event_type=None, origin=None, queue_name=None, sub_type=None, origin_type=None, pattern=None ): self.event_type = event_type self.sub_type = sub_type self.origin_type = origin_type self.origin = origin xp_name = xp_name or get_events_exchange_point() if pattern: binding = pattern else: binding = self._topic(event_type, origin, sub_type, origin_type) self.binding = binding # TODO: Provide a case where we can have multiple bindings (e.g. different event_types) # prefix the queue_name, if specified, with the sysname if queue_name is not None: if not queue_name.startswith(bootstrap.get_sys_name()): queue_name = "%s.%s" % (bootstrap.get_sys_name(), queue_name) else: queue_name = create_simple_unique_id() if hasattr(self, "_process") and self._process: queue_name = "%s_%s" % (self._process._proc_name, queue_name) queue_name = "%s.%s" % (bootstrap.get_sys_name(), queue_name) # set this name to be picked up by inherited folks self._ev_recv_name = (xp_name, queue_name)
def es_cleanup(): es_host = CFG.get_safe("server.elasticsearch.host", "localhost") es_port = CFG.get_safe("server.elasticsearch.port", "9200") es = ep.ElasticSearch(host=es_host, port=es_port, timeout=10) indexes = STD_INDEXES.keys() indexes.append("%s_resources_index" % get_sys_name().lower()) indexes.append("%s_events_index" % get_sys_name().lower()) for index in indexes: IndexManagementService._es_call(es.river_couchdb_delete, index) IndexManagementService._es_call(es.index_delete, index)
def _cleanup_method(self, queue_name, ep=None): """ Common method to be passed to each spawned ION process to clean up their process-queue. @TODO Leaks implementation detail, should be using XOs """ if not ep._chan._queue_auto_delete: # only need to delete if AMQP didn't handle it for us already! # @TODO this will not work with XOs (future) ch = self.container.node.channel(RecvChannel) ch._recv_name = NameTrio(get_sys_name(), "%s.%s" % (get_sys_name(), queue_name)) ch._destroy_queue()
def on_start(self): TransformDataProcess.on_start(self) # set up subscriber to * self._bt_sub = Subscriber(callback=lambda m, h: self.call_process(m), from_name=NameTrio(get_sys_name(), 'bench_queue', '*')) # spawn listener self._sub_gl = spawn(self._bt_sub.listen) # set up publisher to anything! self._bt_pub = Publisher(to_name=NameTrio(get_sys_name(), str(uuid.uuid4())[0:6]))
def _cleanup_method(self, queue_name, ep=None): """ Common method to be passed to each spawned ION process to clean up their process-queue. @TODO Leaks implementation detail, should be using XOs """ if ep._chan is not None and not ep._chan._queue_auto_delete: # only need to delete if AMQP didn't handle it for us already! # @TODO this will not work with XOs (future) try: ch = self.container.node.channel(RecvChannel) ch._recv_name = NameTrio(get_sys_name(), "%s.%s" % (get_sys_name(), queue_name)) ch._destroy_queue() except TransportError as ex: log.warn("Cleanup method triggered an error, ignoring: %s", ex)
def setUp(self): super(DataRetrieverIntTestAlpha,self).setUp() self._start_container() config = DotDict() config.bootstrap.processes.ingestion.module = 'ion.processes.data.ingestion.ingestion_worker_a' config.bootstrap.processes.replay.module = 'ion.processes.data.replay.replay_process_a' self.container.start_rel_from_url('res/deploy/r2dm.yml', config) self.datastore_name = 'test_datasets' self.datastore = self.container.datastore_manager.get_datastore(self.datastore_name, profile=DataStore.DS_PROFILE.SCIDATA) self.data_retriever = DataRetrieverServiceClient() self.dataset_management = DatasetManagementServiceClient() self.resource_registry = ResourceRegistryServiceClient() xs_dot_xp = CFG.core_xps.science_data try: self.XS, xp_base = xs_dot_xp.split('.') self.XP = '.'.join([get_sys_name(), xp_base]) except ValueError: raise StandardError('Invalid CFG for core_xps.science_data: "%s"; must have "xs.xp" structure' % xs_dot_xp)
def setUp(self): super(DataRetrieverServiceIntTest, self).setUp() self._start_container() self.container.start_rel_from_url('res/deploy/r2dm.yml') self.couch = self.container.datastore_manager.get_datastore( 'test_data_retriever', profile=DataStore.DS_PROFILE.SCIDATA) self.datastore_name = 'test_data_retriever' self.dr_cli = DataRetrieverServiceClient(node=self.container.node) self.dsm_cli = DatasetManagementServiceClient(node=self.container.node) self.rr_cli = ResourceRegistryServiceClient(node=self.container.node) self.ps_cli = PubsubManagementServiceClient(node=self.container.node) self.tms_cli = TransformManagementServiceClient( node=self.container.node) self.pd_cli = ProcessDispatcherServiceClient(node=self.container.node) xs_dot_xp = CFG.core_xps.science_data try: self.XS, xp_base = xs_dot_xp.split('.') self.XP = '.'.join([bootstrap.get_sys_name(), xp_base]) except ValueError: raise StandardError( 'Invalid CFG for core_xps.science_data: "%s"; must have "xs.xp" structure' % xs_dot_xp) self.thread_pool = list()
def __init__(self, orgname=None, datastore_manager=None, events_enabled=False): # Get an instance of datastore configured as directory. datastore_manager = datastore_manager or bootstrap.container_instance.datastore_manager self.dir_store = datastore_manager.get_datastore(DataStore.DS_DIRECTORY) self.orgname = orgname or CFG.system.root_org self.is_root = (self.orgname == CFG.system.root_org) self.events_enabled = events_enabled self.event_pub = None self.event_sub = None # Create directory root entry (for current org) if not existing root_de = self.register("/", "DIR", sys_name=bootstrap.get_sys_name()) if root_de is None: # We created this directory just now pass if self.events_enabled: # init change event publisher self.event_pub = EventPublisher() # Register to receive directory changes self.event_sub = EventSubscriber(event_type="ContainerConfigModifiedEvent", origin="Directory", callback=self.receive_directory_change_event)
def on_start(self): if not self.CFG.get_safe('system.elasticsearch', False): text = 'Can not initialize indexes without ElasticSearch enabled. Please enable system.elasticsearch.' log.error(text) raise BadRequest(text) self.sysname = get_sys_name().lower() self.es_host = self.CFG.get_safe('server.elasticsearch.host', 'localhost') self.es_port = self.CFG.get_safe('server.elasticsearch.port', '9200') self.index_shards = self.CFG.get_safe('server.elasticsearch.shards', 5) self.index_replicas = self.CFG.get_safe( 'server.elasticsearch.replicas', 1) self.river_shards = self.CFG.get_safe( 'server.elasticsearch.river_shards', 5) self.river_replicas = self.CFG.get_safe( 'server.elasticsearch.river_replicas', 1) self.es = ep.ElasticSearch(host=self.es_host, port=self.es_port, timeout=10) op = self.CFG.get('op', None) if op == 'index_bootstrap': self.index_bootstrap() elif op == 'clean_bootstrap': self.clean_bootstrap() else: raise BadRequest('Operation Unknown')
def launch_benchmark(transform_number=1, primer=1,message_length=4): import gevent from gevent.greenlet import Greenlet from pyon.util.containers import DotDict from pyon.net.transport import NameTrio from pyon.net.endpoint import Publisher import uuid num = transform_number msg_len = message_length transforms = list() pids = 1 TransformBenchTesting.message_length = message_length cc = Container.instance pub = Publisher(to_name=NameTrio(get_sys_name(),str(uuid.uuid4())[0:6])) for i in xrange(num): tbt=cc.proc_manager._create_service_instance(str(pids), 'tbt', 'prototype.transforms.linear', 'TransformInPlace', DotDict({'process':{'name':'tbt%d' % pids, 'transform_id':pids}})) tbt.init() tbt.start() gevent.sleep(0.2) for i in xrange(primer): pub.publish(list(xrange(msg_len))) g = Greenlet(tbt.perf) g.start() transforms.append(tbt) pids += 1
def transaction(self, app_name=None, op=None, attrs=None, status_descr=None, status=None, req_bytes=None, resp_bytes=None, uS=None, initiator=None, target=None): """ Record a transaction (typically completed RPC). Called from Process level endpoint layer. """ log.debug("SFlowManager.transaction") # build up the true app name full_app = ['ion', get_sys_name()] # don't duplicate the container (proc ids are typically containerid.number) if self._container.id in app_name: full_app.append(app_name) else: full_app.extend((self._container.id, app_name)) full_app_name = ".".join(full_app) tsample = { 'flow_sample': { 'app_name': full_app_name, 'sampling_rate': self._trans_sample_rate, 'app_operation': { 'operation': op, 'attributes': "&".join(["%s=%s" % (k, v) for k, v in attrs.iteritems()]), 'status_descr': status_descr, 'status': status, 'req_bytes': req_bytes, 'resp_bytes': resp_bytes, 'uS': uS }, 'app_initiator': { 'actor': initiator, }, 'app_target': { 'actor': target, } } } self._publish(tsample)
def __init__(self, orgname=None, datastore_manager=None, events_enabled=False): # Get an instance of datastore configured as directory. datastore_manager = datastore_manager or bootstrap.container_instance.datastore_manager self.dir_store = datastore_manager.get_datastore(DataStore.DS_DIRECTORY) self.orgname = orgname or CFG.system.root_org self.is_root = (self.orgname == CFG.system.root_org) self.events_enabled = events_enabled self.event_pub = None self.event_sub = None # Create directory root entry (for current org) if not existing if CFG.system.auto_bootstrap: root_de = self.register("/", "DIR", sys_name=bootstrap.get_sys_name()) if root_de is None: # We created this directory just now pass if self.events_enabled: # init change event publisher self.event_pub = EventPublisher() # Register to receive directory changes self.event_sub = EventSubscriber(event_type="ContainerConfigModifiedEvent", origin="Directory", callback=self.receive_directory_change_event)
def listen(lch): """ The purpose of the this listen method is to trigger waits in code below. By setting up a listener that subscribes to both 3 and 5, and putting received messages into the appropriate gevent-queues client side, we can assume that the channel we're actually testing with get_stats etc has had the message delivered too. """ lch._queue_auto_delete = False lch.setup_listener( NameTrio(bootstrap.get_sys_name(), 'alternate_listener'), 'routed.3') lch._bind('routed.5') lch.start_consume() while True: try: newchan = lch.accept() m, h, d = newchan.recv() count = m.rsplit(',', 1)[-1] if m.startswith('5,'): self.five_events.put(int(count)) newchan.ack(d) elif m.startswith('3,'): self.three_events.put(int(count)) newchan.ack(d) else: raise StandardError("unknown message: %s" % m) except ChannelClosedError: break
def __init__(self, *args, **kwargs): BaseContainerAgent.__init__(self, *args, **kwargs) # Coordinates the container start self._status = INIT self._is_started = False # set container id and cc_agent name (as they are set in base class call) self.id = get_default_container_id() self.name = "cc_agent_%s" % self.id self.start_time = get_ion_ts() bootstrap.container_instance = self Container.instance = self self.container = self # Make self appear as process to service clients self.CCAP = CCAP self.CFG = CFG log.debug("Container (sysname=%s) initializing ..." % bootstrap.get_sys_name()) # Keep track of the overrides from the command-line, so they can trump app/rel file data self.spawn_args = kwargs # Greenlet context-local storage self.context = LocalContextMixin() # Load general capabilities file and augment with specific profile self._load_capabilities() # Start the capabilities start_order = self.cap_profile['start_order'] for cap in start_order: if cap not in self._cap_definitions: raise ContainerError( "CC capability %s not defined in profile" % cap) if cap in self._capabilities or cap in self._cap_instances: raise ContainerError("CC capability %s already initialized" % cap) try: cap_def = self._cap_definitions[cap] log.debug("__init__(): Initializing '%s'" % cap) cap_obj = named_any(cap_def['class'])(container=self) self._cap_instances[cap] = cap_obj if 'depends_on' in cap_def and cap_def['depends_on']: dep_list = cap_def['depends_on'].split(',') for dep in dep_list: dep = dep.strip() if dep not in self._cap_initialized: raise ContainerError( "CC capability %s dependent on non-existing capability %s" % (cap, dep)) if 'field' in cap_def and cap_def['field']: setattr(self, cap_def['field'], cap_obj) self._cap_initialized.append(cap) except Exception as ex: log.error("Container Capability %s init error: %s" % (cap, ex)) raise log.debug("Container initialized, OK.")
def on_start(self): if not self.CFG.get_safe('system.elasticsearch', False): text = 'Can not initialize indexes without ElasticSearch enabled. Please enable system.elasticsearch.' log.error(text) raise BadRequest(text) self.sysname = get_sys_name().lower() self.es_host = self.CFG.get_safe('server.elasticsearch.host', 'localhost') self.es_port = self.CFG.get_safe('server.elasticsearch.port', '9200') self.index_shards = self.CFG.get_safe('server.elasticsearch.shards',5) self.index_replicas = self.CFG.get_safe('server.elasticsearch.replicas', 1) self.river_shards = self.CFG.get_safe('server.elasticsearch.river_shards',5) self.river_replicas = self.CFG.get_safe('server.elasticsearch.river_replicas',1) self.es = ep.ElasticSearch(host=self.es_host, port=self.es_port, timeout=10) op = self.CFG.get('op',None) if op == 'index_bootstrap': self.index_bootstrap() elif op == 'clean_bootstrap': self.clean_bootstrap() else: raise BadRequest('Operation Unknown')
def listen(lch): """ The purpose of the this listen method is to trigger waits in code below. By setting up a listener that subscribes to both 3 and 5, and putting received messages into the appropriate gevent-queues client side, we can assume that the channel we're actually testing with get_stats etc has had the message delivered too. """ lch._queue_auto_delete = False lch.setup_listener(NameTrio(bootstrap.get_sys_name(), 'alternate_listener'), 'routed.3') lch._bind('routed.5') lch.start_consume() while True: try: newchan = lch.accept() m, h, d = newchan.recv() count = m.rsplit(',', 1)[-1] if m.startswith('5,'): self.five_events.put(int(count)) newchan.ack(d) elif m.startswith('3,'): self.three_events.put(int(count)) newchan.ack(d) else: raise StandardError("unknown message: %s" % m) except ChannelClosedError: break
def launch_benchmark(transform_number=1, primer=1, message_length=4): import gevent from gevent.greenlet import Greenlet from pyon.util.containers import DotDict from pyon.net.transport import NameTrio from pyon.net.endpoint import Publisher import uuid num = transform_number msg_len = message_length transforms = list() pids = 1 TransformBenchTesting.message_length = message_length cc = Container.instance pub = Publisher(to_name=NameTrio(get_sys_name(), str(uuid.uuid4())[0:6])) for i in xrange(num): tbt = cc.proc_manager._create_service_instance( str(pids), 'tbt', 'prototype.transforms.linear', 'TransformInPlace', DotDict({ 'process': { 'name': 'tbt%d' % pids, 'transform_id': pids } })) tbt.init() tbt.start() gevent.sleep(0.2) for i in xrange(primer): pub.publish(list(xrange(msg_len))) g = Greenlet(tbt.perf) g.start() transforms.append(tbt) pids += 1
def on_start(self): # print env temporarily to debug cei import os log.info('ENV vars: %s' % str(os.environ)) op = self.CFG.get("op", None) datastore = self.CFG.get("datastore", None) path = self.CFG.get("path", None) prefix = self.CFG.get("prefix", get_sys_name()).lower() log.info("DatastoreLoader: {op=%s, datastore=%s, path=%s, prefix=%s}" % (op, datastore, path, prefix)) self.da = datastore_admin.DatastoreAdmin() if op: if op == "load": self.da.load_datastore(path, datastore, ignore_errors=False) elif op == "dump": self.da.dump_datastore(path, datastore) elif op == "blame": # TODO make generic self.da.get_blame_objects() elif op == "clear": self.da.clear_datastore(datastore, prefix) else: raise iex.BadRequest("Operation unknown") else: raise iex.BadRequest("No operation specified")
def on_start(self): # print env temporarily to debug cei import os log.info('ENV vars: %s' % str(os.environ)) op = self.CFG.get("op", None) datastore = self.CFG.get("datastore", None) path = self.CFG.get("path", None) prefix = self.CFG.get("prefix", get_sys_name()).lower() log.info("DatastoreLoader: {op=%s, datastore=%s, path=%s, prefix=%s}" % (op, datastore, path, prefix)) self.da = datastore_admin.DatastoreAdmin() if op: if op == "load": self.da.load_datastore(path, datastore, ignore_errors=False) elif op == "dump": self.da.dump_datastore(path, datastore) elif op == "dumpres": from ion.util.datastore.resources import ResourceRegistryHelper rrh = ResourceRegistryHelper() rrh.dump_resources_as_xlsx(path) elif op == "blame": # TODO make generic self.da.get_blame_objects() elif op == "clear": self.da.clear_datastore(datastore, prefix) else: raise iex.BadRequest("Operation unknown") else: raise iex.BadRequest("No operation specified")
def test_create_xs(self): xs = self.ex_manager.create_xs(sentinel.xs) exstr = '%s.ion.xs.%s' % (get_sys_name(), str( sentinel.xs)) # what we expect the exchange property to return self.assertEquals(xs._exchange, sentinel.xs) self.assertEquals(xs.exchange, exstr) self.assertEquals(xs.queue, None) self.assertEquals(xs.binding, None) self.assertEquals(xs._xs_exchange_type, 'topic') self.assertEquals(xs._xs_durable, False) self.assertEquals(xs._xs_auto_delete, True) # should be in our map too self.assertIn(sentinel.xs, self.ex_manager.xs_by_name) self.assertEquals(self.ex_manager.xs_by_name[sentinel.xs], xs) # should've tried to declare self.ex_manager._transport.declare_exchange_impl.assert_called_once_with( self.ex_manager._client, exstr, auto_delete=True, durable=False, exchange_type='topic')
def test_init(self): self.assertEquals(self._pub._send_name.exchange, "%s.pyon.events" % bootstrap.get_sys_name()) self.assertEquals(self._pub._send_name.queue, None) pub = EventPublisher(node=self._node, xp=sentinel.xp) self.assertEquals(pub._send_name.exchange, sentinel.xp) self.assertEquals(pub._send_name.queue, None)
def on_initial_bootstrap(self, process, config, **kwargs): if 'test' in get_sys_name(): # If this is a launch designed for tests, don't launch the QC Post Processor return if self.process_exists(process, 'qc_post_processor'): # Short circuit the bootstrap to make sure not more than one is ever started return self.scheduler_service = SchedulerServiceProcessClient(process=process) self.process_dispatcher = ProcessDispatcherServiceProcessClient(process=process) self.run_interval = CFG.get_safe('service.qc_processing.run_interval', 24) interval_key = uuid4().hex # Unique identifier for this process config = DotDict() config.process.interval_key = interval_key process_definition = ProcessDefinition(name='qc_post_processor', executable={'module':'ion.processes.data.transforms.qc_post_processing', 'class':'QCPostProcessing'}) process_definition_id = self.process_dispatcher.create_process_definition(process_definition) process_id = self.process_dispatcher.create_process(process_definition_id) self.process_dispatcher.schedule_process(process_definition_id, process_id=process_id, configuration=config) timer_id = self.scheduler_service.create_interval_timer(start_time=str(time.time()), end_time='-1', #Run FOREVER interval=3600*self.run_interval, event_origin=interval_key)
def __init__(self, *args, **kwargs): BaseContainerAgent.__init__(self, *args, **kwargs) self._is_started = False self._capabilities = [] # set container id and cc_agent name (as they are set in base class call) self.id = get_default_container_id() self.name = "cc_agent_%s" % self.id Container.instance = self from pyon.core import bootstrap bootstrap.container_instance = self log.debug("Container (sysname=%s) initializing ..." % bootstrap.get_sys_name()) # DatastoreManager - controls access to Datastores (both mock and couch backed) self.datastore_manager = DatastoreManager() self.datastore_manager.start() self._capabilities.append("DATASTORE_MANAGER") # Keep track of the overrides from the command-line, so they can trump app/rel file data self.spawn_args = kwargs # Instantiate Directory and self-register # Has the additional side effect of either # bootstrapping the configuration into the # directory or read the configuration based # in the value of the auto_bootstrap setting self.directory = Directory() # Create this Container's specific ExchangeManager instance self.ex_manager = ExchangeManager(self) # Create this Container's specific ProcManager instance self.proc_manager = ProcManager(self) # Create this Container's specific AppManager instance self.app_manager = AppManager(self) # File System - Interface to the OS File System, using correct path names and setups self.file_system = FileSystem(CFG) # Governance Controller - manages the governance related interceptors self.governance_controller = GovernanceController(self) # sFlow manager - controls sFlow stat emission self.sflow_manager = SFlowManager(self) # Coordinates the container start self._status = "INIT" # protection for when the container itself is used as a Process for clients self.container = self log.debug("Container initialized, OK.")
def get_datastore(self, ds_name, profile=DataStore.DS_PROFILE.BASIC, config=None): """ Factory method to get a datastore instance from given name, profile and config. This is the central point to cache these instances, to decide persistent or mock and to force clean the store on first use. @param ds_name Logical name of datastore (will be scoped with sysname) @param profile One of known constants determining the use of the store @param config Override config to use """ assert ds_name, "Must provide ds_name" if ds_name in self._datastores: log.debug("get_datastore(): Found instance of store '%s'" % ds_name) return self._datastores[ds_name] scoped_name = ("%s_%s" % (get_sys_name(), ds_name)).lower() # Imports here to prevent cyclic module dependency from pyon.core.bootstrap import CFG config = config or CFG persistent = not bool(get_safe(config, "system.mockdb")) force_clean = bool(get_safe(config, "system.force_clean")) log.info( "get_datastore(): Create instance of store '%s' {persistent=%s, force_clean=%s, scoped_name=%s}" % (ds_name, persistent, force_clean, scoped_name) ) # Persistent (CouchDB) or MockDB? if persistent: # Use inline import to prevent circular import dependency from pyon.datastore.couchdb.couchdb_datastore import CouchDB_DataStore new_ds = CouchDB_DataStore(datastore_name=scoped_name, profile=profile) else: # Use inline import to prevent circular import dependency from pyon.datastore.mockdb.mockdb_datastore import MockDB_DataStore new_ds = MockDB_DataStore(datastore_name=scoped_name) # , profile=profile) # Clean the store instance if force_clean: try: new_ds.delete_datastore(scoped_name) except NotFound as nf: pass # Create store if not existing if not new_ds.datastore_exists(scoped_name): new_ds.create_datastore(scoped_name) # Set a few standard datastore instance fields new_ds.local_name = ds_name new_ds.ds_profile = profile self._datastores[ds_name] = new_ds return new_ds
def pre_initialize_ion(): # Do necessary system initialization # Make sure this happens only once iadm = InterfaceAdmin(bootstrap.get_sys_name(), config=CFG) iadm.create_core_datastores() #iadm.store_config(CFG) iadm.store_interfaces(idempotent=True) iadm.close()
def every_three(): p = self.container.node.channel(PublisherChannel) p._send_name = NameTrio(bootstrap.get_sys_name(), 'routed.3') counter = 0 while not self.publish_three.wait(timeout=3): p.send('3,' + str(counter)) counter += 1
def _clean(cls, config): if not cls.root: cls.root = os.path.join( config.get_safe('container.filesystem.root', '/tmp/scion'), get_sys_name()) log.info('Removing %s', cls.root) if os.path.exists(cls.root): shutil.rmtree(cls.root)
def auto_bootstrap_config(bootstrap_config, system_cfg): print "pyon: config: Auto bootstrap CFG into directory" from pyon.core.bootstrap import get_sys_name from pyon.ion.directory_standalone import DirectoryStandalone directory = DirectoryStandalone(sysname=get_sys_name(), config=bootstrap_config) de = directory.lookup("/Config/CFG") if not de: directory.register("/Config", "CFG", **system_cfg.copy())
def every_five(): p = self.container.node.channel(PublisherChannel) p._send_name = NameTrio(bootstrap.get_sys_name(), "routed.5") counter = 0 while not self.publish_five.wait(timeout=5): p.send("5," + str(counter)) counter += 1
def every_three(): p = self.container.node.channel(PublisherChannel) p._send_name = NameTrio(bootstrap.get_sys_name(), 'routed.3') counter = 0 while not self.publish_three.wait(timeout=3): p.send('3,' + str(counter)) counter+=1
def __init__(self, *args, **kwargs): BasePubsubManagementService.__init__(self,*args,**kwargs) xs_dot_xp = CFG.core_xps.science_data try: self.XS, xp_base = xs_dot_xp.split('.') self.XP = '.'.join([bootstrap.get_sys_name(), xp_base]) except ValueError: raise StandardError('Invalid CFG for core_xps.science_data: "%s"; must have "xs.xp" structure' % xs_dot_xp)
def __init__(self, container = None): self.container = container or bootstrap.container_instance self.sysname = get_sys_name() self._resources = {} self._associations = {} self._assoc_by_sub = {} self._directory = {} self._res_by_type = {} self._attr_by_type = {}
def test_create_xs_with_params(self): xs = self.ex_manager.create_xs(sentinel.xs, exchange_type=sentinel.ex_type, durable=True) exstr = '%s.ion.xs.%s' % (get_sys_name(), str(sentinel.xs)) # what we expect the exchange property to return self.assertEquals(xs._xs_durable, True) self.assertEquals(xs._xs_exchange_type, sentinel.ex_type) # declaration? self.pt.declare_exchange_impl.assert_called_with(exstr, auto_delete=True, durable=True, exchange_type=sentinel.ex_type)
def __init__(self, config=None, sysname=None): if not config: from pyon.core.bootstrap import CFG config = CFG self.config = config if not sysname: from pyon.core.bootstrap import get_sys_name sysname = get_sys_name() self.sysname = sysname
def test_create_xs_with_params(self): xs = self.ex_manager.create_xs(sentinel.xs, exchange_type=sentinel.ex_type, durable=True) exstr = '%s.ion.xs.%s' % (get_sys_name(), str(sentinel.xs)) # what we expect the exchange property to return self.assertEquals(xs._xs_durable, True) self.assertEquals(xs._xs_exchange_type, sentinel.ex_type) # declaration? self.ex_manager._transport.declare_exchange_impl.assert_called_with(self.ex_manager._client, exstr, auto_delete=True, durable=True, exchange_type=sentinel.ex_type)
def start(self): log.debug("Container starting...") # Check if this UNIX process already runs a Container. self.pidfile = "cc-pid-%d" % os.getpid() if os.path.exists(self.pidfile): raise Exception("Container.on_start(): Container is a singleton per UNIX process. Existing pid file found: %s" % self.pidfile) # write out a PID file containing our agent messaging name with open(self.pidfile, 'w') as f: from pyon.core.bootstrap import get_sys_name pid_contents = {'messaging': dict(CFG.server.amqp), 'container-agent': self.name, 'container-xp': get_sys_name() } f.write(msgpack.dumps(pid_contents)) atexit.register(self._cleanup_pid) # set up abnormal termination handler for this container def handl(signum, frame): try: self._cleanup_pid() # cleanup the pidfile first self.quit() # now try to quit - will not error on second cleanup pidfile call finally: signal.signal(signal.SIGTERM, self._normal_signal) os.kill(os.getpid(), signal.SIGTERM) self._normal_signal = signal.signal(signal.SIGTERM, handl) self.datastore_manager.start() # Instantiate Directory and self-register self.directory = Directory(self.datastore_manager) self.directory.register("/Containers", self.id, cc_agent=self.name) # Create other repositories to make sure they are there and clean if needed self.datastore_manager.get_datastore("resources", DataStore.DS_PROFILE.RESOURCES) self.datastore_manager.get_datastore("objects", DataStore.DS_PROFILE.OBJECTS) self.state_repository = StateRepository(self.datastore_manager) self.event_repository = EventRepository(self.datastore_manager) # Start ExchangeManager. In particular establish broker connection self.ex_manager.start() # TODO: Move this in ExchangeManager - but there is an error self.node, self.ioloop = messaging.make_node() # TODO: shortcut hack self.proc_manager.start() self.app_manager.start() # Start the CC-Agent API rsvc = ProcessRPCServer(node=self.node, name=self.name, service=self, process=self) # Start an ION process with the right kind of endpoint factory proc = self.proc_manager.proc_sup.spawn((CFG.cc.proctype or 'green', None), listener=rsvc) self.proc_manager.proc_sup.ensure_ready(proc) log.info("Container started, OK.")
def setUp(self): self._start_container() self.container.start_rel_from_url('res/deploy/r2dm.yml') xs_dot_xp = CFG.core_xps.science_data try: self.XS, xp_base = xs_dot_xp.split('.') self.XP = '.'.join([bootstrap.get_sys_name(), xp_base]) except ValueError: raise StandardError('Invalid CFG for core_xps.science_data: "%s"; must have "xs.xp" structure' % xs_dot_xp)
def auto_bootstrap_interfaces(bootstrap_config): print "pyon: config: Auto bootstrap interfaces into directory" from pyon.core.bootstrap import get_sys_name from pyon.ion.directory_standalone import DirectoryStandalone directory = DirectoryStandalone(sysname=get_sys_name(), config=bootstrap_config) de = directory.lookup("/ServiceInterfaces") if not de: _bootstrap_object_defs(directory) _bootstrap_service_defs(directory)
def __init__(self, *args, **kwargs): BaseContainerAgent.__init__(self, *args, **kwargs) self._is_started = False # set id and name (as they are set in base class call) self.id = string.replace('%s_%d' % (os.uname()[1], os.getpid()), ".", "_") self.name = "cc_agent_%s" % self.id Container.instance = self # TODO: Bug: Replacing CFG instance not work because references are already public. Update directly dict_merge(CFG, kwargs, inplace=True) from pyon.core import bootstrap bootstrap.container_instance = self bootstrap.assert_configuration(CFG) log.debug("Container (sysname=%s) initializing ..." % bootstrap.get_sys_name()) # Keep track of the overrides from the command-line, so they can trump app/rel file data self.spawn_args = kwargs # Load object and service registry etc. bootstrap_pyon() # Create this Container's specific ExchangeManager instance self.ex_manager = ExchangeManager(self) # Create this Container's specific ProcManager instance self.proc_manager = ProcManager(self) # Create this Container's specific AppManager instance self.app_manager = AppManager(self) # DatastoreManager - controls access to Datastores (both mock and couch backed) self.datastore_manager = DatastoreManager() # File System - Interface to the OS File System, using correct path names and setups self.file_system = FileSystem(CFG) # Governance Controller - manages the governance related interceptors self.governance_controller = GovernanceController(self) # sFlow manager - controls sFlow stat emission self.sflow_manager = SFlowManager(self) # Coordinates the container start self._is_started = False self._capabilities = [] self._status = "INIT" # protection for when the container itself is used as a Process for clients self.container = self log.debug("Container initialized, OK.")
def __init__(self, *args, **kwargs): BaseContainerAgent.__init__(self, *args, **kwargs) # Coordinates the container start self._status = INIT self._is_started = False # set container id and cc_agent name (as they are set in base class call) self.id = get_default_container_id() self.name = "cc_agent_%s" % self.id bootstrap.container_instance = self Container.instance = self self.container = self # Make self appear as process to service clients self.CCAP = CCAP self.CFG = CFG log.debug("Container (sysname=%s) initializing ..." % bootstrap.get_sys_name()) # Keep track of the overrides from the command-line, so they can trump app/rel file data self.spawn_args = kwargs # Greenlet context-local storage self.context = LocalContextMixin() # Load general capabilities file and augment with specific profile self._load_capabilities() # Start the capabilities start_order = self.cap_profile['start_order'] for cap in start_order: if cap not in self._cap_definitions: raise ContainerError("CC capability %s not defined in profile" % cap) if cap in self._capabilities or cap in self._cap_instances: raise ContainerError("CC capability %s already initialized" % cap) try: cap_def = self._cap_definitions[cap] log.debug("__init__(): Initializing '%s'" % cap) cap_obj = named_any(cap_def['class'])(container=self) self._cap_instances[cap] = cap_obj if 'depends_on' in cap_def and cap_def['depends_on']: dep_list = cap_def['depends_on'].split(',') for dep in dep_list: dep = dep.strip() if dep not in self._cap_initialized: raise ContainerError("CC capability %s dependent on non-existing capability %s" % (cap, dep)) if 'field' in cap_def and cap_def['field']: setattr(self, cap_def['field'], cap_obj) self._cap_initialized.append(cap) except Exception as ex: log.error("Container Capability %s init error: %s" % (cap, ex)) raise log.debug("Container initialized, OK.")
def __init__(self, node=None, to_name=None, name=None): BaseEndpoint.__init__(self, node=node) if name: log.warn("SendingBaseEndpoint: name param is deprecated, please use to_name instead") self._send_name = to_name or name # ensure NameTrio if not isinstance(self._send_name, NameTrio): self._send_name = NameTrio(bootstrap.get_sys_name(), self._send_name) # if send_name is a tuple it takes precedence
def setUp(self): self._start_container() # skip if we're not an amqp node if not isinstance(self.container.ex_manager._nodes.get('priviledged', self.container.ex_manager._nodes.values()[0]), NodeB): raise unittest.SkipTest("Management API only works with AMQP nodes for now") self.transport = self.container.ex_manager.get_transport(self.container.ex_manager._nodes.get('priviledged', self.container.ex_manager._nodes.values()[0])) # test to see if we have access to management URL! url = self.container.ex_manager._get_management_url('overview') try: self.container.ex_manager._make_management_call(url, use_ems=False) except exception.IonException as ex: raise unittest.SkipTest("Cannot find management API: %s" % str(ex)) self.ex_name = ".".join([get_sys_name(), "ex", str(uuid4())[0:6]]) self.queue_name = ".".join([get_sys_name(), "q", str(uuid4())[0:6]]) self.bind_name = str(uuid4())[0:6]
def apply_remote_config(system_cfg): from pyon.core.bootstrap import get_sys_name from pyon.core.exception import Conflict from pyon.ion.directory_standalone import DirectoryStandalone directory = DirectoryStandalone(sysname=get_sys_name(), config=system_cfg) de = directory.lookup("/Config/CFG") if not de: raise Conflict("Expected /Config/CFG in directory. Correct Org??") apply_configuration(system_cfg, de)
def on_start(self): log.debug("StreamBinder start") queue_name = self.CFG.get('args', {}).get('queue_name', None) binding = self.CFG.get('args', {}).get('binding', None) # Create scoped exchange name XP = '.'.join([bootstrap.get_sys_name(), 'science_data']) self.channel = self.container.node.channel(BindingChannel) self.channel.setup_listener(NameTrio(XP, queue_name), binding=binding)
def __init__(self, router=None): BaseNode.__init__(self) self._own_router = True if router is not None: self._local_router = router self._own_router = False else: self._local_router = LocalRouter(get_sys_name()) self._channel_id_pool = IDPool()
def setUp(self): self._start_container() self.container.start_rel_from_url('res/deploy/r2cei.yml') #self.pd_cli = ProcessDispatcherServiceClient(node=self.container.node) self.pd_cli = ProcessDispatcherServiceClient(to_name="process_dispatcher") self.process_definition_id = uuid4().hex self.process_definition_name = 'test' self.process_definition = ProcessDefinition(name=self.process_definition_name, executable={ 'module': 'ion.agents.cei.test.test_haagent', 'class': 'TestProcess' }) self.pd_cli.create_process_definition(self.process_definition, self.process_definition_id) self.resource_id = "haagent_1234" self._haa_name = "high_availability_agent" self._haa_dashi_name = "dashi_haa_" + uuid4().hex self._haa_dashi_uri = get_dashi_uri_from_cfg() self._haa_dashi_exchange = "%s.hatests" % bootstrap.get_sys_name() self._haa_config = { 'highavailability': { 'policy': { 'interval': 1, 'name': 'npreserving', 'parameters': { 'preserve_n': 0 } }, 'process_definition_id': self.process_definition_id, 'dashi_messaging' : True, 'dashi_exchange' : self._haa_dashi_exchange, 'dashi_name': self._haa_dashi_name }, 'agent': {'resource_id': self.resource_id}, } self._base_services, _ = self.container.resource_registry.find_resources( restype="Service", name=self.process_definition_name) self._base_procs = self.pd_cli.list_processes() self.waiter = ProcessStateWaiter() self.waiter.start() self.container_client = ContainerAgentClient(node=self.container.node, name=self.container.name) self._haa_pid = self.container_client.spawn_process(name=self._haa_name, module="ion.agents.cei.high_availability_agent", cls="HighAvailabilityAgent", config=self._haa_config) # Start a resource agent client to talk with the instrument agent. self._haa_pyon_client = SimpleResourceAgentClient(self.resource_id, process=FakeProcess()) log.info('Got haa client %s.', str(self._haa_pyon_client)) self.haa_client = HighAvailabilityAgentClient(self._haa_pyon_client)
def setUp(self): self._start_container() self.container.start_rel_from_url('res/deploy/r2dm.yml') xs_dot_xp = CFG.core_xps.science_data try: self.XS, xp_base = xs_dot_xp.split('.') self.XP = '.'.join([bootstrap.get_sys_name(), xp_base]) except ValueError: raise StandardError( 'Invalid CFG for core_xps.science_data: "%s"; must have "xs.xp" structure' % xs_dot_xp)
def __init__(self, *args, **kwargs): BasePubsubManagementService.__init__(self, *args, **kwargs) xs_dot_xp = CFG.core_xps.science_data try: self.XS, xp_base = xs_dot_xp.split('.') self.XP = '.'.join([bootstrap.get_sys_name(), xp_base]) except ValueError: raise StandardError( 'Invalid CFG for core_xps.science_data: "%s"; must have "xs.xp" structure' % xs_dot_xp)
def get_datastore_instance(cls, ds_name, profile=None): profile = profile or DataStore.DS_PROFILE_MAPPING.get( ds_name, DataStore.DS_PROFILE.BASIC) new_ds = DatastoreFactory.get_datastore( datastore_name=ds_name, profile=profile, scope=get_sys_name(), config=CFG, variant=DatastoreFactory.DS_FULL) return new_ds
def _force_clean(cls, recreate=False): from pyon.core.bootstrap import get_sys_name, CFG from pyon.datastore.datastore_common import DatastoreFactory datastore = DatastoreFactory.get_datastore( config=CFG, variant=DatastoreFactory.DS_BASE, scope=get_sys_name()) #datastore = DatastoreFactory.get_datastore(config=CFG, variant=DatastoreFactory.DS_BASE) dbs = datastore.list_datastores() things_to_clean = filter( lambda x: x.startswith('%s_' % get_sys_name().lower()), dbs) try: for thing in things_to_clean: datastore.delete_datastore(datastore_name=thing) if recreate: datastore.create_datastore(datastore_name=thing) finally: datastore.close() if os.environ.get('CEI_LAUNCH_TEST', None) is None: FileSystem._clean(CFG)
def test_create_xp_with_different_xs(self): xs = self.ex_manager.create_xs(sentinel.xs) xs_exstr = '%s.ion.xs.%s' % (get_sys_name(), str(sentinel.xs)) # what we expect the exchange property to return xp = self.ex_manager.create_xp(sentinel.xp, xs) xp_exstr = '%s.xp.%s' % (xs_exstr, str(sentinel.xp)) # check mappings self.assertIn(sentinel.xp, self.ex_manager.xn_by_name) self.assertIn(xp, self.ex_manager.xn_by_xs[sentinel.xs]) self.assertEquals(xp.exchange, xp_exstr)
def test_delete_xp(self): xp = self.ex_manager.create_xp(sentinel.xp) exstr = "%s.ion.xs.%s.xp.%s" % (get_sys_name(), self.ex_manager.default_xs._exchange, str(sentinel.xp)) self.assertIn(sentinel.xp, self.ex_manager.xn_by_name) self.ex_manager.delete_xp(xp) self.assertNotIn(sentinel.xp, self.ex_manager.xn_by_name) # deletion self.ex_manager._transport.delete_exchange_impl.assert_called_once_with(self.ex_manager._client, exstr)
def test_create_xn_with_different_xs(self): xs = self.ex_manager.create_xs(sentinel.xs) xs_exstr = '%s.ion.xs.%s' % (get_sys_name(), str(sentinel.xs)) # what we expect the exchange property to return xn = self.ex_manager.create_xn_service('servicename', xs) qstr = '%s.%s' % (xn.exchange, 'servicename') # what we expect the queue name to look like # check mappings self.assertIn('servicename', self.ex_manager.xn_by_name) self.assertIn(xn, self.ex_manager.xn_by_xs[sentinel.xs]) self.assertEquals(xn.queue, qstr)