def __init__(self): bootstrap_pyon() dsm = DatastoreManager() self.datastore = dsm.get_datastore(ds_name='coverage') if self.datastore is None: raise RuntimeError("Unable to load datastore for coverage") else: self.entity_table_name = self.datastore._get_datastore_name() log.trace("Got datastore: %s type %s" % (self.datastore._get_datastore_name(), str(type(self.datastore)))) self.span_store = dsm.get_datastore(ds_name='coverage_spans') if self.span_store is None: raise RuntimeError("Unable to load datastore for coverage_spans") else: self.span_table_name = self.span_store._get_datastore_name() log.trace("Got datastore: %s type %s", self.span_store._get_datastore_name(), type(self.span_store))
def test_event_repo(self): dsm = DatastoreManager() ds = dsm.get_datastore("events") ds.delete_datastore() ds.create_datastore() event_repo = EventRepository(dsm) event_repo1 = EventRepository(dsm) event1 = Event(origin="resource1") event_id, _ = event_repo.put_event(event1) event1r = event_repo.get_event(event_id) self.assertEquals(event1.origin, event1r.origin) ts = 1328680477138 events2 = [] for i in xrange(5): ev = Event(origin="resource2", ts_created=str(ts + i)) event_id, _ = event_repo.put_event(ev) events2.append((ev, event_id)) events_r = event_repo.find_events(origin='resource2') self.assertEquals(len(events_r), 5) events_r = event_repo.find_events(origin='resource2', descending=True) self.assertEquals(len(events_r), 5) events_r = event_repo.find_events(origin='resource2', limit=3) self.assertEquals(len(events_r), 3) events_r = event_repo.find_events(origin='resource2', start_ts=str(ts + 3)) self.assertEquals(len(events_r), 2) events_r = event_repo.find_events(origin='resource2', end_ts=str(ts + 2)) self.assertEquals(len(events_r), 3) events_r = event_repo.find_events(origin='resource2', start_ts=str(ts + 3), end_ts=str(ts + 4)) self.assertEquals(len(events_r), 2) events_r = event_repo.find_events(start_ts=str(ts + 3), end_ts=str(ts + 4)) self.assertEquals(len(events_r), 2) event3 = ResourceLifecycleEvent(origin="resource3") event_id, _ = event_repo.put_event(event3) events_r = event_repo.find_events(event_type="ResourceLifecycleEvent") self.assertEquals(len(events_r), 1)
def test_conv_repo(self): dsm = DatastoreManager() ds = dsm.get_datastore("conversations") ds.delete_datastore() ds.create_datastore() conv_repo = ConvRepository(dsm) conv1 = ConversationMessage(sender='sender', recipient='receiver', conversation_id='1', protocol='rpc', headers={'nofield':'novalue'}) conv_id, _ = conv_repo.put_conv(conv1) conv1r = conv_repo.conv_store.read(conv_id) self.assertEquals(conv1.sender, conv1r.sender)
def test_event_repo(self): dsm = DatastoreManager() ds = dsm.get_datastore("events") ds.delete_datastore() ds.create_datastore() event_repo = EventRepository(dsm) event_repo1 = EventRepository(dsm) event1 = Event(origin="resource1") event_id, _ = event_repo.put_event(event1) event1r = event_repo.get_event(event_id) self.assertEquals(event1.origin, event1r.origin) ts = 1328680477138 events2 = [] for i in xrange(5): ev = Event(origin="resource2", ts_created=str(ts + i)) event_id, _ = event_repo.put_event(ev) events2.append((ev,event_id)) events_r = event_repo.find_events(origin='resource2') self.assertEquals(len(events_r), 5) events_r = event_repo.find_events(origin='resource2', descending=True) self.assertEquals(len(events_r), 5) events_r = event_repo.find_events(origin='resource2', limit=3) self.assertEquals(len(events_r), 3) events_r = event_repo.find_events(origin='resource2', start_ts=str(ts+3)) self.assertEquals(len(events_r), 2) events_r = event_repo.find_events(origin='resource2', end_ts=str(ts+2)) self.assertEquals(len(events_r), 3) events_r = event_repo.find_events(origin='resource2', start_ts=str(ts+3), end_ts=str(ts+4)) self.assertEquals(len(events_r), 2) events_r = event_repo.find_events(start_ts=str(ts+3), end_ts=str(ts+4)) self.assertEquals(len(events_r), 2) event3 = ResourceLifecycleEvent(origin="resource3") event_id, _ = event_repo.put_event(event3) events_r = event_repo.find_events(event_type="ResourceLifecycleEvent") self.assertEquals(len(events_r), 1)
class UserNotificationService(BaseUserNotificationService): """ A service that provides users with an API for CRUD methods for notifications. """ def on_start(self): #--------------------------------------------------------------------------------------------------- # Get the event Repository #--------------------------------------------------------------------------------------------------- self.event_repo = self.container.instance.event_repository self.smtp_client = setting_up_smtp_client() self.ION_NOTIFICATION_EMAIL_ADDRESS = '*****@*****.**' #--------------------------------------------------------------------------------------------------- # Create an event processor #--------------------------------------------------------------------------------------------------- self.event_processor = EmailEventProcessor(self.smtp_client) #--------------------------------------------------------------------------------------------------- # load event originators, types, and table #--------------------------------------------------------------------------------------------------- self.event_types = CFG.event.types self.event_table = {} #--------------------------------------------------------------------------------------------------- # Get the clients #--------------------------------------------------------------------------------------------------- self.discovery = DiscoveryServiceClient() self.process_dispatcher = ProcessDispatcherServiceClient() self.datastore_manager = DatastoreManager() self.event_publisher = EventPublisher() self.scheduler_service = SchedulerService() def on_quit(self): pass def create_notification(self, notification=None, user_id=''): """ Persists the provided NotificationRequest object for the specified Origin id. Associate the Notification resource with the user_id string. returned id is the internal id by which NotificationRequest will be identified in the data store. @param notification NotificationRequest @param user_id str @retval notification_id str @throws BadRequest if object passed has _id or _rev attribute """ if not user_id: raise BadRequest("User id not provided.") #--------------------------------------------------------------------------------------------------- # Persist Notification object as a resource if it has already not been persisted #--------------------------------------------------------------------------------------------------- # find all notifications in the system notifs, _ = self.clients.resource_registry.find_resources(restype = RT.NotificationRequest) # if the notification has already been registered, simply use the old id if notification in notifs: log.warning("Notification object has already been created in resource registry before for another user. No new id to be generated.") notification_id = notification._id else: # since the notification has not been registered yet, register it and get the id notification_id, _ = self.clients.resource_registry.create(notification) #------------------------------------------------------------------------------------------------------------------- # read the registered notification request object because this has an _id and is more useful #------------------------------------------------------------------------------------------------------------------- notification = self.clients.resource_registry.read(notification_id) #----------------------------------------------------------------------------------------------------------- # Create an event processor for user. This sets up callbacks etc. # As a side effect this updates the UserInfo object and also the user info and reverse user info dictionaries. #----------------------------------------------------------------------------------------------------------- user = self.event_processor.add_notification_for_user(notification_request=notification, user_id=user_id) #------------------------------------------------------------------------------------------------------------------- # Allow the indexes to be updated for ElasticSearch # We publish event only after this so that the reload of the user info works by the # notification workers work properly #------------------------------------------------------------------------------------------------------------------- # todo: This is to allow time for the indexes to be created before publishing ReloadUserInfoEvent for notification workers. # todo: When things are more refined, it will be nice to have an event generated when the # indexes are updated so that a subscriber here when it received that event will publish # the reload user info event. time.sleep(4) #------------------------------------------------------------------------------------------------------------------- # Generate an event that can be picked by a notification worker so that it can update its user_info dictionary #------------------------------------------------------------------------------------------------------------------- log.debug("(create notification) Publishing ReloadUserInfoEvent for notification_id: %s" % notification_id) self.event_publisher.publish_event( event_type= "ReloadUserInfoEvent", origin="UserNotificationService", description= "A notification has been created.", notification_id = notification_id) return notification_id def update_notification(self, notification=None, user_id = ''): """Updates the provided NotificationRequest object. Throws NotFound exception if an existing version of NotificationRequest is not found. Throws Conflict if the provided NotificationRequest object is not based on the latest persisted version of the object. @param notification NotificationRequest @throws BadRequest if object does not have _id or _rev attribute @throws NotFound object with specified id does not exist @throws Conflict object not based on latest persisted object version """ #------------------------------------------------------------------------------------------------------------------- # Get the old notification #------------------------------------------------------------------------------------------------------------------- old_notification = self.clients.resource_registry.read(notification._id) #------------------------------------------------------------------------------------------------------------------- # Update the notification #------------------------------------------------------------------------------------------------------------------- self.clients.resource_registry.update(notification) #------------------------------------------------------------------------------------------------------------------- # reading up the notification object to make sure we have the newly registered notification request object #------------------------------------------------------------------------------------------------------------------- notification_id = notification._id notification = self.clients.resource_registry.read(notification_id) #------------------------------------------------------------------------------------ # Update the UserInfo object #------------------------------------------------------------------------------------ user = self.update_user_info_object(user_id, notification, old_notification) #------------------------------------------------------------------------------------ # Update the user_info dictionary maintained by UNS #------------------------------------------------------------------------------------ self.update_user_info_dictionary(user, notification, old_notification) #------------------------------------------------------------------------------------------------------------------- # Generate an event that can be picked by notification workers so that they can update their user_info dictionary #------------------------------------------------------------------------------------------------------------------- log.info("(update notification) Publishing ReloadUserInfoEvent for updated notification") self.event_publisher.publish_event( event_type= "ReloadUserInfoEvent", origin="UserNotificationService", description= "A notification has been updated." ) def read_notification(self, notification_id=''): """Returns the NotificationRequest object for the specified notification id. Throws exception if id does not match any persisted NotificationRequest objects. @param notification_id str @retval notification NotificationRequest @throws NotFound object with specified id does not exist """ notification = self.clients.resource_registry.read(notification_id) return notification def delete_notification(self, notification_id=''): """For now, permanently deletes NotificationRequest object with the specified id. Throws exception if id does not match any persisted NotificationRequest. @param notification_id str @throws NotFound object with specified id does not exist """ #------------------------------------------------------------------------------------------------------------------- # Stop the event subscriber for the notification #------------------------------------------------------------------------------------------------------------------- notification_request = self.clients.resource_registry.read(notification_id) self.event_processor.stop_notification_subscriber(notification_request=notification_request) #------------------------------------------------------------------------------------------------------------------- # delete the notification from the user_info and reverse_user_info dictionaries #------------------------------------------------------------------------------------------------------------------- self.delete_notification_from_user_info(notification_id) #------------------------------------------------------------------------------------------------------------------- # delete from the resource registry #------------------------------------------------------------------------------------------------------------------- self.clients.resource_registry.delete(notification_id) #------------------------------------------------------------------------------------------------------------------- # Generate an event that can be picked by a notification worker so that it can update its user_info dictionary #------------------------------------------------------------------------------------------------------------------- log.info("(delete notification) Publishing ReloadUserInfoEvent for notification_id: %s" % notification_id) self.event_publisher.publish_event( event_type= "ReloadUserInfoEvent", origin="UserNotificationService", description= "A notification has been deleted.", notification_id = notification_id) def delete_notification_from_user_info(self, notification_id): ''' Helper method to delete the notification from the user_info dictionary ''' for user_name, value in self.event_processor.user_info.iteritems(): for notif in value['notifications']: if notification_id == notif._id: # remove the notification value['notifications'].remove(notif) # remove the notification_subscription self.event_processor.user_info[user_name]['notification_subscriptions'].pop(notification_id) self.event_processor.reverse_user_info = calculate_reverse_user_info(self.event_processor.user_info) def find_events(self, origin='', type='', min_datetime='', max_datetime='', limit=-1, descending=False): """Returns a list of events that match the specified search criteria. Will throw a not NotFound exception if no events exist for the given parameters. @param origin str @param type str @param min_datetime str @param max_datetime str @param limit int (integer limiting the number of results (0 means unlimited)) @param descending boolean (if True, reverse order (of production time) is applied, e.g. most recent first) @retval event_list [] @throws NotFound object with specified parameters does not exist @throws NotFound object with specified parameters does not exist """ if min_datetime and max_datetime: search_time = "SEARCH 'ts_created' VALUES FROM %s TO %s FROM 'events_index'" % (min_datetime, max_datetime) else: search_time = 'search "ts_created" is "*" from "events_index"' if origin: search_origin = 'search "origin" is "%s" from "events_index"' % origin else: search_origin = 'search "origin" is "*" from "events_index"' if type: search_type = 'search "type_" is "%s" from "events_index"' % type else: search_type = 'search "type_" is "*" from "events_index"' search_string = search_time + ' and ' + search_origin + ' and ' + search_type # get the list of ids corresponding to the events ret_vals = self.discovery.parse(search_string) log.debug("(find_events) Discovery search returned the following event ids: %s" % ret_vals) events = [] for event_id in ret_vals: datastore = self.datastore_manager.get_datastore('events') event_obj = datastore.read(event_id) events.append(event_obj) log.debug("(find_events) UNS found the following relevant events: %s" % events) if limit > -1: list = [] for i in xrange(limit): list.append(events[i]) return list #todo implement time ordering: ascending or descending return events def publish_event(self, event=None, scheduler_entry= None): ''' Publish a general event at a certain time using the UNS @param event Event @param scheduler_entry SchedulerEntry This object is created through Scheduler Service ''' log.debug("UNS to publish on schedule the event: %s" % event) #-------------------------------------------------------------------------------- # Set up a subscriber to get the nod from the scheduler to publish the event #-------------------------------------------------------------------------------- def publish(message, headers): self.event_publisher._publish_event( event_msg = event, origin=event.origin, event_type = event.type_) log.info("UNS published an event in response to a nod from the Scheduler Service.") event_subscriber = EventSubscriber( event_type = "ResourceEvent", callback=publish) event_subscriber.start() # Use the scheduler to set up a timer self.scheduler_service.create_timer(scheduler_entry) def create_worker(self, number_of_workers=1): ''' Creates notification workers @param number_of_workers int @ret_val pids list ''' pids = [] for n in xrange(number_of_workers): process_definition = ProcessDefinition( name='notification_worker_%s' % n) process_definition.executable = { 'module': 'ion.processes.data.transforms.notification_worker', 'class':'NotificationWorker' } process_definition_id = self.process_dispatcher.create_process_definition(process_definition=process_definition) # ------------------------------------------------------------------------------------ # Process Spawning # ------------------------------------------------------------------------------------ pid2 = self.process_dispatcher.create_process(process_definition_id) #@todo put in a configuration configuration = {} configuration['process'] = dict({ 'name': 'notification_worker_%s' % n, 'type':'simple' }) pid = self.process_dispatcher.schedule_process( process_definition_id, configuration = configuration, process_id=pid2 ) pids.append(pid) return pids def process_batch(self, start_time = 0, end_time = 10): ''' This method is launched when an process_batch event is received. The user info dictionary maintained by the User Notification Service is used to query the event repository for all events for a particular user that have occurred in a provided time interval, and then an email is sent to the user containing the digest of all the events. ''' for user_name, value in self.event_processor.user_info.iteritems(): notifications = value['notifications'] events_for_message = [] search_time = "SEARCH 'ts_created' VALUES FROM %s TO %s FROM 'events_index'" % (start_time, end_time) for notification in notifications: if notification.origin: search_origin = 'search "origin" is "%s" from "events_index"' % notification.origin else: search_origin = 'search "origin" is "*" from "events_index"' if notification.origin_type: search_origin_type= 'search "origin_type" is "%s" from "events_index"' % notification.origin_type else: search_origin_type= 'search "origin_type" is "*" from "events_index"' if notification.event_type: search_event_type = 'search "type_" is "%s" from "events_index"' % notification.event_type else: search_event_type = 'search "type_" is "*" from "events_index"' search_string = search_time + ' and ' + search_origin + ' and ' + search_origin_type + ' and ' + search_event_type # get the list of ids corresponding to the events ret_vals = self.discovery.parse(search_string) for event_id in ret_vals: datastore = self.datastore_manager.get_datastore('events') event_obj = datastore.read(event_id) events_for_message.append(event_obj) log.debug("Found following events of interest to user, %s: %s" % (user_name, events_for_message)) # send a notification email to each user using a _send_email() method if events_for_message: self.format_and_send_email(events_for_message, user_name) def format_and_send_email(self, events_for_message, user_name): ''' Format the message for a particular user containing information about the events he is to be notified about ''' message = str(events_for_message) log.info("The user, %s, will get the following events in his batch notification email: %s" % (user_name, message)) msg_body = '' count = 1 for event in events_for_message: # build the email from the event content msg_body += string.join(("\r\n", "Event %s: %s" % (count, event), "", "Originator: %s" % event.origin, "", "Description: %s" % event.description , "", "Event time stamp: %s" % event.ts_created, "\r\n", "------------------------" "\r\n")) count += 1 msg_body += "You received this notification from ION because you asked to be " + \ "notified about this event from this source. " + \ "To modify or remove notifications about this event, " + \ "please access My Notifications Settings in the ION Web UI. " + \ "Do not reply to this email. This email address is not monitored " + \ "and the emails will not be read. \r\n " log.debug("The email has the following message body: %s" % msg_body) msg_subject = "(SysName: " + get_sys_name() + ") ION event " self.send_batch_email( msg_body = msg_body, msg_subject = msg_subject, msg_recipient=self.event_processor.user_info[user_name]['user_contact'].email, smtp_client=self.smtp_client ) def send_batch_email(self, msg_body, msg_subject, msg_recipient, smtp_client): ''' Send the email ''' msg = MIMEText(msg_body) msg['Subject'] = msg_subject msg['From'] = self.ION_NOTIFICATION_EMAIL_ADDRESS msg['To'] = msg_recipient log.debug("EventProcessor.subscription_callback(): sending email to %s"\ %msg_recipient) smtp_sender = CFG.get_safe('server.smtp.sender') smtp_client.sendmail(smtp_sender, msg_recipient, msg.as_string()) def update_user_info_object(self, user_id, new_notification, old_notification): ''' Update the UserInfo object. If the passed in parameter, od_notification, is None, it does not need to remove the old notification ''' #------------------------------------------------------------------------------------ # read the user #------------------------------------------------------------------------------------ user = self.clients.resource_registry.read(user_id) if not user: raise BadRequest("No user with the provided user_id: %s" % user_id) notifications = [] for item in user.variables: if item['name'] == 'notifications': if old_notification and old_notification in item['value']: notifications = item['value'] # remove the old notification notifications.remove(old_notification) # put in the new notification notifications.append(new_notification) item['value'] = notifications break #------------------------------------------------------------------------------------ # update the resource registry #------------------------------------------------------------------------------------ self.clients.resource_registry.update(user) return user def update_user_info_dictionary(self, user, new_notification, old_notification): #------------------------------------------------------------------------------------ # Remove the old notifications #------------------------------------------------------------------------------------ if old_notification in self.event_processor.user_info[user.name]['notifications']: # remove from notifications list self.event_processor.user_info[user.name]['notifications'].remove(old_notification) #------------------------------------------------------------------------------------ # update the notification subscription object #------------------------------------------------------------------------------------ # get the old notification_subscription notification_subscription = self.event_processor.user_info[user.name]['notification_subscriptions'].pop(old_notification._id) # update that old notification subscription notification_subscription._res_obj = new_notification # feed the updated notification subscription back into the user info dictionary self.event_processor.user_info[user.name]['notification_subscriptions'][old_notification._id] = notification_subscription #------------------------------------------------------------------------------------ # find the already existing notifications for the user #------------------------------------------------------------------------------------ notifications = self.event_processor.user_info[user.name]['notifications'] notifications.append(new_notification) #------------------------------------------------------------------------------------ # update the user info - contact information, notifications #------------------------------------------------------------------------------------ self.event_processor.user_info[user.name]['user_contact'] = user.contact self.event_processor.user_info[user.name]['notifications'] = notifications self.event_processor.reverse_user_info = calculate_reverse_user_info(self.event_processor.user_info)
def test_directory(self): dsm = DatastoreManager() ds = dsm.get_datastore("resources") ds.delete_datastore() ds.create_datastore() directory = Directory(datastore_manager=dsm) directory.start() #self.addCleanup(directory.dir_store.delete_datastore) objs = directory.dir_store.list_objects() self.assert_("_design/directory" in objs) root = directory.lookup("/DIR") self.assert_(root is not None) entry = directory.lookup("/temp") self.assert_(entry is None) entry_old = directory.register("/","temp") self.assertEquals(entry_old, None) # Create a node entry = directory.lookup("/temp") self.assertEquals(entry, {} ) # The create case entry_old = directory.register("/temp", "entry1", foo="awesome") self.assertEquals(entry_old, None) entry_new = directory.lookup("/temp/entry1") self.assertEquals(entry_new, {"foo":"awesome"}) # The update case entry_old = directory.register("/temp", "entry1", foo="ingenious") self.assertEquals(entry_old, {"foo":"awesome"}) # The delete case entry_old = directory.unregister("/temp", "entry1") self.assertEquals(entry_old, {"foo":"ingenious"}) entry_new = directory.lookup("/temp/entry1") self.assertEquals(entry_new, None) directory.register("/BranchA", "X", resource_id="rid1") directory.register("/BranchA", "Y", resource_id="rid2") directory.register("/BranchA", "Z", resource_id="rid3") directory.register("/BranchA/X", "a", resource_id="rid4") directory.register("/BranchA/X", "b", resource_id="rid5") directory.register("/BranchB", "k", resource_id="rid6") directory.register("/BranchB", "l", resource_id="rid7") directory.register("/BranchB/k", "m", resource_id="rid7") directory.register("/BranchB/k", "X") res_list = directory.find_by_value("/", attribute="resource_id", value="rid3") self.assertEquals(len(res_list), 1) self.assertEquals(res_list[0].org, "ION") self.assertEquals(res_list[0].parent, "/BranchA") self.assertEquals(res_list[0].key, "Z") res_list = directory.find_by_value("/", attribute="resource_id", value="rid34") self.assertEquals(len(res_list), 0) res_list = directory.find_by_value("/", attribute="resource_id", value="rid7") self.assertEquals(len(res_list), 2) res_list = directory.find_by_value("/BranchB", attribute="resource_id", value="rid7") self.assertEquals(len(res_list), 2) res_list = directory.find_by_value("/Branch", attribute="resource_id", value="rid7") self.assertEquals(len(res_list), 2) res_list = directory.find_by_value("/BranchB/k", attribute="resource_id", value="rid7") self.assertEquals(len(res_list), 1) res_list = directory.find_child_entries("/BranchB/k/m") self.assertEquals(len(res_list), 0) res_list = directory.find_child_entries("/BranchB") self.assertEquals(len(res_list), 2) res_list = directory.find_child_entries("/BranchB/k/m", direct_only=False) self.assertEquals(len(res_list), 0) res_list = directory.find_child_entries("/BranchB", direct_only=False) self.assertEquals(len(res_list), 4) res_list = directory.find_by_key("X") self.assertEquals(len(res_list), 2) res_list = directory.find_by_key("X", parent="/BranchB") self.assertEquals(len(res_list), 1) directory.stop()
class Container(BaseContainerAgent): """ The Capability Container. Its purpose is to spawn/monitor processes and services that do the bulk of the work in the ION system. """ # Singleton static variables node = None id = None name = None pidfile = None instance = None def __init__(self, *args, **kwargs): BaseContainerAgent.__init__(self, *args, **kwargs) self._is_started = False # set id and name (as they are set in base class call) self.id = string.replace('%s_%d' % (os.uname()[1], os.getpid()), ".", "_") self.name = "cc_agent_%s" % self.id Container.instance = self # TODO: Bug: Replacing CFG instance not work because references are already public. Update directly dict_merge(CFG, kwargs, inplace=True) from pyon.core import bootstrap bootstrap.container_instance = self bootstrap.assert_configuration(CFG) log.debug("Container (sysname=%s) initializing ..." % bootstrap.get_sys_name()) # Keep track of the overrides from the command-line, so they can trump app/rel file data self.spawn_args = kwargs # Load object and service registry etc. bootstrap_pyon() # Create this Container's specific ExchangeManager instance self.ex_manager = ExchangeManager(self) # Create this Container's specific ProcManager instance self.proc_manager = ProcManager(self) # Create this Container's specific AppManager instance self.app_manager = AppManager(self) # DatastoreManager - controls access to Datastores (both mock and couch backed) self.datastore_manager = DatastoreManager() # File System - Interface to the OS File System, using correct path names and setups self.file_system = FileSystem(CFG) # Governance Controller - manages the governance related interceptors self.governance_controller = GovernanceController(self) # sFlow manager - controls sFlow stat emission self.sflow_manager = SFlowManager(self) # Coordinates the container start self._is_started = False self._capabilities = [] self._status = "INIT" # protection for when the container itself is used as a Process for clients self.container = self log.debug("Container initialized, OK.") def start(self): log.debug("Container starting...") if self._is_started: raise ContainerError("Container already started") # Check if this UNIX process already runs a Container. self.pidfile = "cc-pid-%d" % os.getpid() if os.path.exists(self.pidfile): raise ContainerError("Container.on_start(): Container is a singleton per UNIX process. Existing pid file found: %s" % self.pidfile) # write out a PID file containing our agent messaging name with open(self.pidfile, 'w') as f: pid_contents = {'messaging': dict(CFG.server.amqp), 'container-agent': self.name, 'container-xp': bootstrap.get_sys_name() } f.write(msgpack.dumps(pid_contents)) atexit.register(self._cleanup_pid) self._capabilities.append("PID_FILE") # set up abnormal termination handler for this container def handl(signum, frame): try: self._cleanup_pid() # cleanup the pidfile first self.quit() # now try to quit - will not error on second cleanup pidfile call finally: signal.signal(signal.SIGTERM, self._normal_signal) os.kill(os.getpid(), signal.SIGTERM) self._normal_signal = signal.signal(signal.SIGTERM, handl) self._capabilities.append("EXCHANGE_CONNECTION") self.datastore_manager.start() self._capabilities.append("DATASTORE_MANAGER") # Instantiate Directory and self-register self.directory = Directory() self.directory.register("/Containers", self.id, cc_agent=self.name) self.directory.register("/Containers/%s" % self.id, "Processes") self._capabilities.append("DIRECTORY") # Event repository self.event_repository = EventRepository() self.event_pub = EventPublisher() self._capabilities.append("EVENT_REPOSITORY") # Local resource registry self.resource_registry = ResourceRegistry() self._capabilities.append("RESOURCE_REGISTRY") # Persistent objects self.datastore_manager.get_datastore("objects", DataStore.DS_PROFILE.OBJECTS) # State repository self.state_repository = StateRepository() self._capabilities.append("STATE_REPOSITORY") # Start ExchangeManager, which starts the node (broker connection) self.node, self.ioloop = self.ex_manager.start() self._capabilities.append("EXCHANGE_MANAGER") self.proc_manager.start() self._capabilities.append("PROC_MANAGER") self.app_manager.start() self._capabilities.append("APP_MANAGER") self.governance_controller.start() self._capabilities.append("GOVERNANCE_CONTROLLER") if CFG.container.get('sflow', {}).get('enabled', False): self.sflow_manager.start() self._capabilities.append("SFLOW_MANAGER") # Start the CC-Agent API rsvc = ProcessRPCServer(node=self.node, from_name=self.name, service=self, process=self) # Start an ION process with the right kind of endpoint factory proc = self.proc_manager.proc_sup.spawn((CFG.cc.proctype or 'green', None), listener=rsvc) self.proc_manager.proc_sup.ensure_ready(proc) self._capabilities.append("CONTAINER_AGENT") self.event_pub.publish_event(event_type="ContainerLifecycleEvent", origin=self.id, origin_type="CapabilityContainer", sub_type="START", state=ContainerStateEnum.START) self._is_started = True self._status = "RUNNING" log.info("Container started, OK.") @contextmanager def _push_status(self, new_status): """ Temporarily sets the internal status flag. Use this as a decorator or in a with-statement before calling a temporary status changing method, like start_rel_from_url. """ curstatus = self._status self._status = new_status try: yield finally: self._status = curstatus def serve_forever(self): """ Run the container until killed. """ log.debug("In Container.serve_forever") if not self.proc_manager.proc_sup.running: self.start() # serve forever short-circuits if immediate is on and children len is ok num_procs = len(self.proc_manager.proc_sup.children) immediate = CFG.system.get('immediate', False) if not (immediate and num_procs == 1): # only spawned greenlet is the CC-Agent # print a warning just in case if immediate and num_procs != 1: log.warn("CFG.system.immediate=True but number of spawned processes is not 1 (%d)", num_procs) try: # This just waits in this Greenlet for all child processes to complete, # which is triggered somewhere else. self.proc_manager.proc_sup.join_children() except (KeyboardInterrupt, SystemExit) as ex: log.info('Received a kill signal, shutting down the container.') watch_parent = CFG.system.get('watch_parent', None) if watch_parent: watch_parent.kill() except: log.exception('Unhandled error! Forcing container shutdown') else: log.debug("Container.serve_forever short-circuiting due to CFG.system.immediate") self.proc_manager.proc_sup.shutdown(CFG.cc.timeout.shutdown) def status(self): """ Returns the internal status. """ return self._status def _cleanup_pid(self): if self.pidfile: log.debug("Cleanup pidfile: %s", self.pidfile) try: os.remove(self.pidfile) except Exception, e: log.warn("Pidfile could not be deleted: %s" % str(e)) self.pidfile = None
def test_event_persist(self): events = [{'_id': '778dcc0811bd4b518ffd1ef873f3f457', 'base_types': ['Event'], 'description': 'Event to deliver the status of instrument.', 'origin': 'instrument_1', 'origin_type': 'PlatformDevice', 'status': 1, 'sub_type': 'input_voltage', 'time_stamps': [2.0, 2.0], 'ts_created': '1364121284585', 'type_': 'DeviceStatusEvent', 'valid_values': [-100, 100], 'values': [110.0, 111.0]}, {'_id': 'b40731684e41418082e1727f3cf61026', 'base_types': ['Event'], 'description': 'Event to deliver the status of instrument.', 'origin': 'instrument_1', 'origin_type': 'PlatformDevice', 'status': 1, 'sub_type': 'input_voltage', 'time_stamps': [2.0, 2.0], 'ts_created': '1364121284609', 'type_': 'DeviceStatusEvent', 'valid_values': [-100, 100], 'values': [110.0, 111.0]}] dsm = DatastoreManager() ds = dsm.get_datastore("events", "EVENTS") ds.delete_datastore() ds.create_datastore() event_repo = EventRepository(dsm) event1_dict = events[0].copy() event1_dict.pop("_id") event1_type = event1_dict.pop("type_") event1 = IonObject(event1_type, **event1_dict) event_repo.put_event(event1) events_r = event_repo.find_events(origin=event1_dict["origin"]) self.assertEquals(len(events_r), 1) event1_read = events_r[0][2] self.assertEquals(event1_read.time_stamps, event1_dict["time_stamps"]) event2_dict = events[1].copy() event2_id = event2_dict.pop("_id") event2_type = event2_dict.pop("type_") event2_obj = IonObject(event2_type, **event2_dict) event2_obj._id = event2_id event_repo.put_event(event2_obj) event1_dict = events[0].copy() event1_id = event1_dict.pop("_id") event1_type = event1_dict.pop("type_") event1_obj = IonObject(event1_type, **event1_dict) event1_obj._id = event1_id event2_dict = events[1].copy() event2_id = event2_dict.pop("_id") event2_type = event2_dict.pop("type_") event2_obj = IonObject(event2_type, **event2_dict) event2_obj._id = event2_id event_repo.put_events([event1_obj, event2_obj]) events_r = event_repo.find_events(event_type='DeviceStatusEvent') self.assertEquals(len(events_r), 3)
class Container(BaseContainerAgent): """ The Capability Container. Its purpose is to spawn/monitor processes and services that do the bulk of the work in the ION system. It also manages connections to the Exchange and the various forms of datastores in the systems. """ # Singleton static variables #node = None id = None name = None pidfile = None instance = None def __init__(self, *args, **kwargs): BaseContainerAgent.__init__(self, *args, **kwargs) self._is_started = False # set container id and cc_agent name (as they are set in base class call) self.id = get_default_container_id() self.name = "cc_agent_%s" % self.id self._capabilities = [] bootstrap.container_instance = self Container.instance = self log.debug("Container (sysname=%s) initializing ..." % bootstrap.get_sys_name()) # Keep track of the overrides from the command-line, so they can trump app/rel file data self.spawn_args = kwargs # DatastoreManager - controls access to Datastores (both mock and couch backed) self.datastore_manager = DatastoreManager() # TODO: Do not start a capability here. Symmetric start/stop self.datastore_manager.start() self._capabilities.append("DATASTORE_MANAGER") # Instantiate Directory self.directory = Directory() # internal router self.local_router = None # Create this Container's specific ExchangeManager instance self.ex_manager = ExchangeManager(self) # Create this Container's specific ProcManager instance self.proc_manager = ProcManager(self) # Create this Container's specific AppManager instance self.app_manager = AppManager(self) # File System - Interface to the OS File System, using correct path names and setups self.file_system = FileSystem(CFG) # Governance Controller - manages the governance related interceptors self.governance_controller = GovernanceController(self) # sFlow manager - controls sFlow stat emission self.sflow_manager = SFlowManager(self) # Coordinates the container start self._status = "INIT" # protection for when the container itself is used as a Process for clients self.container = self # publisher, initialized in start() self.event_pub = None # context-local storage self.context = LocalContextMixin() log.debug("Container initialized, OK.") def start(self): log.debug("Container starting...") if self._is_started: raise ContainerError("Container already started") # Check if this UNIX process already runs a Container. self.pidfile = "cc-pid-%d" % os.getpid() if os.path.exists(self.pidfile): raise ContainerError("Container.on_start(): Container is a singleton per UNIX process. Existing pid file found: %s" % self.pidfile) # write out a PID file containing our agent messaging name with open(self.pidfile, 'w') as f: pid_contents = {'messaging': dict(CFG.server.amqp), 'container-agent': self.name, 'container-xp': bootstrap.get_sys_name()} f.write(msgpack.dumps(pid_contents)) atexit.register(self._cleanup_pid) self._capabilities.append("PID_FILE") # set up abnormal termination handler for this container def handl(signum, frame): try: self._cleanup_pid() # cleanup the pidfile first self.quit() # now try to quit - will not error on second cleanup pidfile call finally: signal.signal(signal.SIGTERM, self._normal_signal) os.kill(os.getpid(), signal.SIGTERM) self._normal_signal = signal.signal(signal.SIGTERM, handl) # set up greenlet debugging signal handler gevent.signal(signal.SIGUSR2, self._handle_sigusr2) self.datastore_manager.start() self._capabilities.append("DATASTORE_MANAGER") self._capabilities.append("DIRECTORY") # Event repository self.event_repository = EventRepository() self.event_pub = EventPublisher() self._capabilities.append("EVENT_REPOSITORY") # Local resource registry self.resource_registry = ResourceRegistry() self._capabilities.append("RESOURCE_REGISTRY") # Persistent objects self.datastore_manager.get_datastore("objects", DataStore.DS_PROFILE.OBJECTS) # State repository self.state_repository = StateRepository() self._capabilities.append("STATE_REPOSITORY") # internal router for local transports self.local_router = LocalRouter(bootstrap.get_sys_name()) self.local_router.start() self.local_router.ready.wait(timeout=2) self._capabilities.append("LOCAL_ROUTER") # Start ExchangeManager, which starts the node (broker connection) self.ex_manager.start() self._capabilities.append("EXCHANGE_MANAGER") self.proc_manager.start() self._capabilities.append("PROC_MANAGER") self.app_manager.start() self._capabilities.append("APP_MANAGER") self.governance_controller.start() self._capabilities.append("GOVERNANCE_CONTROLLER") if CFG.get_safe('container.sflow.enabled', False): self.sflow_manager.start() self._capabilities.append("SFLOW_MANAGER") # Start the CC-Agent API rsvc = ProcessRPCServer(node=self.node, from_name=self.name, service=self, process=self) cleanup = lambda _: self.proc_manager._cleanup_method(self.name, rsvc) # Start an ION process with the right kind of endpoint factory proc = self.proc_manager.proc_sup.spawn(name=self.name, listeners=[rsvc], service=self, cleanup_method=cleanup) self.proc_manager.proc_sup.ensure_ready(proc) proc.start_listeners() self._capabilities.append("CONTAINER_AGENT") self.event_pub.publish_event(event_type="ContainerLifecycleEvent", origin=self.id, origin_type="CapabilityContainer", sub_type="START", state=ContainerStateEnum.START) self._is_started = True self._status = "RUNNING" log.info("Container (%s) started, OK." , self.id) def _handle_sigusr2(self):#, signum, frame): """ Handles SIGUSR2, prints debugging greenlet information. """ gls = GreenletLeak.get_greenlets() allgls = [] for gl in gls: status = GreenletLeak.format_greenlet(gl) # build formatted output: # Greenlet at 0xdeadbeef # self: <EndpointUnit at 0x1ffcceef> # func: bound, EndpointUnit.some_func status[0].insert(0, "%s at %s:" % (gl.__class__.__name__, hex(id(gl)))) # indent anything in status a second time prefmt = [s.replace("\t", "\t\t") for s in status[0]] prefmt.append("traceback:") for line in status[1]: for subline in line.split("\n")[0:2]: prefmt.append(subline) glstr = "\n\t".join(prefmt) allgls.append(glstr) # print it out! print >>sys.stderr, "\n\n".join(allgls) with open("gls-%s" % os.getpid(), "w") as f: f.write("\n\n".join(allgls)) @property def node(self): """ Returns the active/default Node that should be used for most communication in the system. Defers to exchange manager, but only if it has been started, otherwise returns None. """ if "EXCHANGE_MANAGER" in self._capabilities: return self.ex_manager.default_node return None @contextmanager def _push_status(self, new_status): """ Temporarily sets the internal status flag. Use this as a decorator or in a with-statement before calling a temporary status changing method, like start_rel_from_url. """ curstatus = self._status self._status = new_status try: yield finally: self._status = curstatus def serve_forever(self): """ Run the container until killed. """ log.debug("In Container.serve_forever") if not self.proc_manager.proc_sup.running: self.start() # serve forever short-circuits if immediate is on and children len is ok num_procs = len(self.proc_manager.proc_sup.children) immediate = CFG.system.get('immediate', False) if not (immediate and num_procs == 1): # only spawned greenlet is the CC-Agent # print a warning just in case if immediate and num_procs != 1: log.warn("CFG.system.immediate=True but number of spawned processes is not 1 (%d)", num_procs) try: # This just waits in this Greenlet for all child processes to complete, # which is triggered somewhere else. self.proc_manager.proc_sup.join_children() except (KeyboardInterrupt, SystemExit) as ex: log.info('Received a kill signal, shutting down the container.') if hasattr(self, 'gl_parent_watch') and self.gl_parent_watch is not None: self.gl_parent_watch.kill() except: log.exception('Unhandled error! Forcing container shutdown') else: log.debug("Container.serve_forever short-circuiting due to CFG.system.immediate") self.proc_manager.proc_sup.shutdown(CFG.cc.timeout.shutdown) def status(self): """ Returns the internal status. """ return self._status def _cleanup_pid(self): if self.pidfile: log.debug("Cleanup pidfile: %s", self.pidfile) try: os.remove(self.pidfile) except Exception, e: log.warn("Pidfile could not be deleted: %s" % str(e)) self.pidfile = None
def test_directory(self): dsm = DatastoreManager() ds = dsm.get_datastore("resources", "DIRECTORY") ds.delete_datastore() ds.create_datastore() directory = Directory(datastore_manager=dsm) directory.start() #self.addCleanup(directory.dir_store.delete_datastore) objs = directory.dir_store.list_objects() if CFG.get_safe("container.datastore.default_server", "couchdb").startswith("couch"): self.assert_("_design/directory" in objs) root = directory.lookup("/DIR") self.assert_(root is not None) entry = directory.lookup("/temp") self.assert_(entry is None) entry_old = directory.register("/","temp") self.assertEquals(entry_old, None) # Create a node entry = directory.lookup("/temp") self.assertEquals(entry, {} ) # The create case entry_old = directory.register("/temp", "entry1", foo="awesome") self.assertEquals(entry_old, None) entry_new = directory.lookup("/temp/entry1") self.assertEquals(entry_new, {"foo":"awesome"}) # The update case entry_old = directory.register("/temp", "entry1", foo="ingenious") self.assertEquals(entry_old, {"foo":"awesome"}) # The delete case entry_old = directory.unregister("/temp", "entry1") self.assertEquals(entry_old, {"foo":"ingenious"}) entry_new = directory.lookup("/temp/entry1") self.assertEquals(entry_new, None) directory.register("/BranchA", "X", resource_id="rid1") directory.register("/BranchA", "Y", resource_id="rid2") directory.register("/BranchA", "Z", resource_id="rid3") directory.register("/BranchA/X", "a", resource_id="rid4") directory.register("/BranchA/X", "b", resource_id="rid5") directory.register("/BranchB", "k", resource_id="rid6") directory.register("/BranchB", "l", resource_id="rid7") directory.register("/BranchB/k", "m", resource_id="rid7") directory.register("/BranchB/k", "X") res_list = directory.find_by_value("/", attribute="resource_id", value="rid3") self.assertEquals(len(res_list), 1) self.assertEquals(res_list[0].org, "ION") self.assertEquals(res_list[0].parent, "/BranchA") self.assertEquals(res_list[0].key, "Z") res_list = directory.find_by_value("/", attribute="resource_id", value="rid34") self.assertEquals(len(res_list), 0) res_list = directory.find_by_value("/", attribute="resource_id", value="rid7") self.assertEquals(len(res_list), 2) res_list = directory.find_by_value("/BranchB", attribute="resource_id", value="rid7") self.assertEquals(len(res_list), 2) res_list = directory.find_by_value("/Branch", attribute="resource_id", value="rid7") self.assertEquals(len(res_list), 2) res_list = directory.find_by_value("/BranchB/k", attribute="resource_id", value="rid7") self.assertEquals(len(res_list), 1) res_list = directory.find_child_entries("/BranchB/k/m") self.assertEquals(len(res_list), 0) res_list = directory.find_child_entries("/BranchB") self.assertEquals(len(res_list), 2) res_list = directory.find_child_entries("/BranchB/k/m", direct_only=False) self.assertEquals(len(res_list), 0) res_list = directory.find_child_entries("/BranchB", direct_only=False) self.assertEquals(len(res_list), 4) res_list = directory.find_by_key("X") self.assertEquals(len(res_list), 2) res_list = directory.find_by_key("X", parent="/BranchB") self.assertEquals(len(res_list), 1) # Test _cleanup_outdated_entries directory.register("/some", "dupentry", foo="ingenious") de = directory.lookup("/some/dupentry", return_entry=True) de1_attrs = de.__dict__.copy() del de1_attrs["_id"] del de1_attrs["_rev"] del de1_attrs["type_"] de1 = DirEntry(**de1_attrs) de_id1,_ = directory.dir_store.create(de1) res_list = directory.find_by_key("dupentry", parent="/some") self.assertEquals(2, len(res_list)) de = directory.lookup("/some/dupentry", return_entry=True) res_list = directory.find_by_key("dupentry", parent="/some") self.assertEquals(1, len(res_list)) de1_attrs = de.__dict__.copy() del de1_attrs["_id"] del de1_attrs["_rev"] del de1_attrs["type_"] de1_attrs["ts_updated"] = str(int(de1_attrs["ts_updated"]) + 10) de1_attrs["attributes"]["unique"] = "NEW" de1 = DirEntry(**de1_attrs) de_id1,_ = directory.dir_store.create(de1) res_list = directory.find_by_key("dupentry", parent="/some") self.assertEquals(2, len(res_list)) de = directory.lookup("/some/dupentry", return_entry=True) res_list = directory.find_by_key("dupentry", parent="/some") self.assertEquals(1, len(res_list)) self.assertEquals("NEW", res_list[0].attributes["unique"]) directory.stop()
def test_directory(self): dsm = DatastoreManager() ds = dsm.get_datastore("resources", "DIRECTORY") ds.delete_datastore() ds.create_datastore() self.patch_cfg('pyon.ion.directory.CFG', {'service': { 'directory': { 'publish_events': False } }}) directory = Directory(datastore_manager=dsm) directory.start() #self.addCleanup(directory.dir_store.delete_datastore) objs = directory.dir_store.list_objects() root = directory.lookup("/DIR") self.assert_(root is not None) entry = directory.lookup("/temp") self.assert_(entry is None) entry_old = directory.register("/", "temp") self.assertEquals(entry_old, None) # Create a node entry = directory.lookup("/temp") self.assertEquals(entry, {}) # The create case entry_old = directory.register("/temp", "entry1", foo="awesome") self.assertEquals(entry_old, None) entry_new = directory.lookup("/temp/entry1") self.assertEquals(entry_new, {"foo": "awesome"}) # The update case entry_old = directory.register("/temp", "entry1", foo="ingenious") self.assertEquals(entry_old, {"foo": "awesome"}) # The delete case entry_old = directory.unregister("/temp", "entry1") self.assertEquals(entry_old, {"foo": "ingenious"}) entry_new = directory.lookup("/temp/entry1") self.assertEquals(entry_new, None) directory.register("/BranchA", "X", resource_id="rid1") directory.register("/BranchA", "Y", resource_id="rid2") directory.register("/BranchA", "Z", resource_id="rid3") directory.register("/BranchA/X", "a", resource_id="rid4") directory.register("/BranchA/X", "b", resource_id="rid5") directory.register("/BranchB", "k", resource_id="rid6") directory.register("/BranchB", "l", resource_id="rid7") directory.register("/BranchB/k", "m", resource_id="rid7") directory.register("/BranchB/k", "X") res_list = directory.find_by_value("/", attribute="resource_id", value="rid3") self.assertEquals(len(res_list), 1) self.assertEquals(res_list[0].org, "ION") self.assertEquals(res_list[0].parent, "/BranchA") self.assertEquals(res_list[0].key, "Z") res_list = directory.find_by_value("/", attribute="resource_id", value="rid34") self.assertEquals(len(res_list), 0) res_list = directory.find_by_value("/", attribute="resource_id", value="rid7") self.assertEquals(len(res_list), 2) res_list = directory.find_by_value("/BranchB", attribute="resource_id", value="rid7") self.assertEquals(len(res_list), 2) res_list = directory.find_by_value("/Branch", attribute="resource_id", value="rid7") self.assertEquals(len(res_list), 2) res_list = directory.find_by_value("/BranchB/k", attribute="resource_id", value="rid7") self.assertEquals(len(res_list), 1) res_list = directory.find_child_entries("/BranchB/k/m") self.assertEquals(len(res_list), 0) res_list = directory.find_child_entries("/BranchB") self.assertEquals(len(res_list), 2) res_list = directory.find_child_entries("/BranchB/k/m", direct_only=False) self.assertEquals(len(res_list), 0) res_list = directory.find_child_entries("/BranchB", direct_only=False) self.assertEquals(len(res_list), 4) res_list = directory.find_by_key("X") self.assertEquals(len(res_list), 2) res_list = directory.find_by_key("X", parent="/BranchB") self.assertEquals(len(res_list), 1) entry_list = directory.lookup_mult("/BranchA", ["X", "Z"]) self.assertEquals(len(entry_list), 2) self.assertEquals(entry_list[0]["resource_id"], "rid1") self.assertEquals(entry_list[1]["resource_id"], "rid3") entry_list = directory.lookup_mult("/BranchA", ["Y", "FOO"]) self.assertEquals(len(entry_list), 2) self.assertEquals(entry_list[0]["resource_id"], "rid2") self.assertEquals(entry_list[1], None) # Test prevent duplicate entries directory.register("/some", "dupentry", foo="ingenious") de = directory.lookup("/some/dupentry", return_entry=True) de1_attrs = de.__dict__.copy() del de1_attrs["_id"] del de1_attrs["_rev"] del de1_attrs["type_"] de1 = DirEntry(**de1_attrs) with self.assertRaises(BadRequest) as ex: de_id1, _ = directory.dir_store.create(de1) self.assertTrue(ex.message.startswith("DirEntry already exists")) res_list = directory.find_by_key("dupentry", parent="/some") self.assertEquals(1, len(res_list))
def test_directory_lock(self): dsm = DatastoreManager() ds = dsm.get_datastore("resources", "DIRECTORY") ds.delete_datastore() ds.create_datastore() self.patch_cfg('pyon.ion.directory.CFG', {'service': { 'directory': { 'publish_events': False } }}) directory = Directory(datastore_manager=dsm) directory.start() lock1 = directory.acquire_lock("LOCK1", lock_info=dict(process="proc1")) self.assertEquals(lock1, True) lock2 = directory.acquire_lock("LOCK1", lock_info=dict(process="proc2")) self.assertEquals(lock2, False) with self.assertRaises(BadRequest): directory.acquire_lock("LOCK/SOME") with self.assertRaises(BadRequest): directory.release_lock("LOCK/SOME") with self.assertRaises(NotFound): directory.release_lock("LOCK2") directory.release_lock("LOCK1") lock1 = directory.acquire_lock("LOCK1", lock_info=dict(process="proc3")) self.assertEquals(lock1, True) # TEST: With lock holders lock5 = directory.acquire_lock("LOCK5", lock_holder="proc1") self.assertEquals(lock5, True) lock5 = directory.acquire_lock("LOCK5", lock_holder="proc1") self.assertEquals(lock5, True) lock5 = directory.acquire_lock("LOCK5", lock_holder="proc2") self.assertEquals(lock5, False) directory.release_lock("LOCK5") # TEST: Timeout lock5 = directory.acquire_lock("LOCK5", lock_holder="proc1", timeout=0.1) self.assertEquals(lock5, True) lock5 = directory.acquire_lock("LOCK5", lock_holder="proc2") self.assertEquals(lock5, False) res = directory.is_locked("LOCK5") self.assertEquals(res, True) gevent.sleep(0.15) res = directory.is_locked("LOCK5") self.assertEquals(res, False) lock5 = directory.acquire_lock("LOCK5", lock_holder="proc2", timeout=0.1) self.assertEquals(lock5, True) gevent.sleep(0.15) # TEST: Holder self renew lock5 = directory.acquire_lock("LOCK5", lock_holder="proc2", timeout=0.1) self.assertEquals(lock5, True) directory.stop()
class Container(BaseContainerAgent): """ The Capability Container. Its purpose is to spawn/monitor processes and services that do the bulk of the work in the ION system. It also manages connections to the Exchange and the various forms of datastores in the systems. """ # Singleton static variables #node = None id = None name = None pidfile = None instance = None def __init__(self, *args, **kwargs): BaseContainerAgent.__init__(self, *args, **kwargs) self._is_started = False # set container id and cc_agent name (as they are set in base class call) self.id = get_default_container_id() self.name = "cc_agent_%s" % self.id self._capabilities = [] bootstrap.container_instance = self Container.instance = self log.debug("Container (sysname=%s) initializing ..." % bootstrap.get_sys_name()) # Keep track of the overrides from the command-line, so they can trump app/rel file data self.spawn_args = kwargs # DatastoreManager - controls access to Datastores (both mock and couch backed) self.datastore_manager = DatastoreManager() # TODO: Do not start a capability here. Symmetric start/stop self.datastore_manager.start() self._capabilities.append("DATASTORE_MANAGER") # Instantiate Directory self.directory = Directory() # internal router self.local_router = None # Create this Container's specific ExchangeManager instance self.ex_manager = ExchangeManager(self) # Create this Container's specific ProcManager instance self.proc_manager = ProcManager(self) # Create this Container's specific AppManager instance self.app_manager = AppManager(self) # File System - Interface to the OS File System, using correct path names and setups self.file_system = FileSystem(CFG) # Governance Controller - manages the governance related interceptors self.governance_controller = GovernanceController(self) # sFlow manager - controls sFlow stat emission self.sflow_manager = SFlowManager(self) # Coordinates the container start self._status = "INIT" # protection for when the container itself is used as a Process for clients self.container = self # publisher, initialized in start() self.event_pub = None # context-local storage self.context = LocalContextMixin() log.debug("Container initialized, OK.") def start(self): log.debug("Container starting...") if self._is_started: raise ContainerError("Container already started") # Check if this UNIX process already runs a Container. self.pidfile = "cc-pid-%d" % os.getpid() if os.path.exists(self.pidfile): raise ContainerError( "Container.on_start(): Container is a singleton per UNIX process. Existing pid file found: %s" % self.pidfile) # write out a PID file containing our agent messaging name with open(self.pidfile, 'w') as f: pid_contents = { 'messaging': dict(CFG.server.amqp), 'container-agent': self.name, 'container-xp': bootstrap.get_sys_name() } f.write(msgpack.dumps(pid_contents)) atexit.register(self._cleanup_pid) self._capabilities.append("PID_FILE") # set up abnormal termination handler for this container def handl(signum, frame): try: self._cleanup_pid() # cleanup the pidfile first self.quit( ) # now try to quit - will not error on second cleanup pidfile call finally: signal.signal(signal.SIGTERM, self._normal_signal) os.kill(os.getpid(), signal.SIGTERM) self._normal_signal = signal.signal(signal.SIGTERM, handl) # set up greenlet debugging signal handler gevent.signal(signal.SIGUSR2, self._handle_sigusr2) self.datastore_manager.start() self._capabilities.append("DATASTORE_MANAGER") self._capabilities.append("DIRECTORY") # Event repository self.event_repository = EventRepository() self.event_pub = EventPublisher() self._capabilities.append("EVENT_REPOSITORY") # Local resource registry self.resource_registry = ResourceRegistry() self._capabilities.append("RESOURCE_REGISTRY") # Persistent objects self.datastore_manager.get_datastore("objects", DataStore.DS_PROFILE.OBJECTS) # State repository self.state_repository = StateRepository() self._capabilities.append("STATE_REPOSITORY") # internal router for local transports self.local_router = LocalRouter(bootstrap.get_sys_name()) self.local_router.start() self.local_router.ready.wait(timeout=2) self._capabilities.append("LOCAL_ROUTER") # Start ExchangeManager, which starts the node (broker connection) self.ex_manager.start() self._capabilities.append("EXCHANGE_MANAGER") self.proc_manager.start() self._capabilities.append("PROC_MANAGER") self.app_manager.start() self._capabilities.append("APP_MANAGER") self.governance_controller.start() self._capabilities.append("GOVERNANCE_CONTROLLER") if CFG.get_safe('container.sflow.enabled', False): self.sflow_manager.start() self._capabilities.append("SFLOW_MANAGER") # Start the CC-Agent API rsvc = ProcessRPCServer(node=self.node, from_name=self.name, service=self, process=self) cleanup = lambda _: self.proc_manager._cleanup_method(self.name, rsvc) # Start an ION process with the right kind of endpoint factory proc = self.proc_manager.proc_sup.spawn(name=self.name, listeners=[rsvc], service=self, cleanup_method=cleanup) self.proc_manager.proc_sup.ensure_ready(proc) proc.start_listeners() self._capabilities.append("CONTAINER_AGENT") self.event_pub.publish_event(event_type="ContainerLifecycleEvent", origin=self.id, origin_type="CapabilityContainer", sub_type="START", state=ContainerStateEnum.START) self._is_started = True self._status = "RUNNING" log.info("Container (%s) started, OK.", self.id) def _handle_sigusr2(self): #, signum, frame): """ Handles SIGUSR2, prints debugging greenlet information. """ gls = GreenletLeak.get_greenlets() allgls = [] for gl in gls: status = GreenletLeak.format_greenlet(gl) # build formatted output: # Greenlet at 0xdeadbeef # self: <EndpointUnit at 0x1ffcceef> # func: bound, EndpointUnit.some_func status[0].insert( 0, "%s at %s:" % (gl.__class__.__name__, hex(id(gl)))) # indent anything in status a second time prefmt = [s.replace("\t", "\t\t") for s in status[0]] prefmt.append("traceback:") for line in status[1]: for subline in line.split("\n")[0:2]: prefmt.append(subline) glstr = "\n\t".join(prefmt) allgls.append(glstr) # print it out! print >> sys.stderr, "\n\n".join(allgls) with open("gls-%s" % os.getpid(), "w") as f: f.write("\n\n".join(allgls)) @property def node(self): """ Returns the active/default Node that should be used for most communication in the system. Defers to exchange manager, but only if it has been started, otherwise returns None. """ if "EXCHANGE_MANAGER" in self._capabilities: return self.ex_manager.default_node return None @contextmanager def _push_status(self, new_status): """ Temporarily sets the internal status flag. Use this as a decorator or in a with-statement before calling a temporary status changing method, like start_rel_from_url. """ curstatus = self._status self._status = new_status try: yield finally: self._status = curstatus def serve_forever(self): """ Run the container until killed. """ log.debug("In Container.serve_forever") if not self.proc_manager.proc_sup.running: self.start() # serve forever short-circuits if immediate is on and children len is ok num_procs = len(self.proc_manager.proc_sup.children) immediate = CFG.system.get('immediate', False) if not (immediate and num_procs == 1): # only spawned greenlet is the CC-Agent # print a warning just in case if immediate and num_procs != 1: log.warn( "CFG.system.immediate=True but number of spawned processes is not 1 (%d)", num_procs) try: # This just waits in this Greenlet for all child processes to complete, # which is triggered somewhere else. self.proc_manager.proc_sup.join_children() except (KeyboardInterrupt, SystemExit) as ex: log.info( 'Received a kill signal, shutting down the container.') if hasattr(self, 'gl_parent_watch' ) and self.gl_parent_watch is not None: self.gl_parent_watch.kill() except: log.exception('Unhandled error! Forcing container shutdown') else: log.debug( "Container.serve_forever short-circuiting due to CFG.system.immediate" ) self.proc_manager.proc_sup.shutdown(CFG.cc.timeout.shutdown) def status(self): """ Returns the internal status. """ return self._status def _cleanup_pid(self): if self.pidfile: log.debug("Cleanup pidfile: %s", self.pidfile) try: os.remove(self.pidfile) except Exception, e: log.warn("Pidfile could not be deleted: %s" % str(e)) self.pidfile = None
def test_directory(self): dsm = DatastoreManager() ds = dsm.get_datastore("resources") ds.delete_datastore() ds.create_datastore() directory = Directory(datastore_manager=dsm) #self.addCleanup(directory.dir_store.delete_datastore) objs = directory.dir_store.list_objects() self.assert_("_design/directory" in objs) root = directory.lookup("/DIR") self.assert_(root is not None) entry = directory.lookup("/temp") self.assert_(entry is None) entry_old = directory.register("/", "temp") self.assertEquals(entry_old, None) # Create a node entry = directory.lookup("/temp") self.assertEquals(entry, {}) # The create case entry_old = directory.register("/temp", "entry1", foo="awesome") self.assertEquals(entry_old, None) entry_new = directory.lookup("/temp/entry1") self.assertEquals(entry_new, {"foo": "awesome"}) # The update case entry_old = directory.register("/temp", "entry1", foo="ingenious") self.assertEquals(entry_old, {"foo": "awesome"}) # The delete case entry_old = directory.unregister("/temp", "entry1") self.assertEquals(entry_old, {"foo": "ingenious"}) entry_new = directory.lookup("/temp/entry1") self.assertEquals(entry_new, None) directory.register("/BranchA", "X", resource_id="rid1") directory.register("/BranchA", "Y", resource_id="rid2") directory.register("/BranchA", "Z", resource_id="rid3") directory.register("/BranchA/X", "a", resource_id="rid4") directory.register("/BranchA/X", "b", resource_id="rid5") directory.register("/BranchB", "k", resource_id="rid6") directory.register("/BranchB", "l", resource_id="rid7") directory.register("/BranchB/k", "m", resource_id="rid7") directory.register("/BranchB/k", "X") res_list = directory.find_by_value("/", attribute="resource_id", value="rid3") self.assertEquals(len(res_list), 1) self.assertEquals(res_list[0].org, "ION") self.assertEquals(res_list[0].parent, "/BranchA") self.assertEquals(res_list[0].key, "Z") res_list = directory.find_by_value("/", attribute="resource_id", value="rid34") self.assertEquals(len(res_list), 0) res_list = directory.find_by_value("/", attribute="resource_id", value="rid7") self.assertEquals(len(res_list), 2) res_list = directory.find_by_value("/BranchB", attribute="resource_id", value="rid7") self.assertEquals(len(res_list), 2) res_list = directory.find_by_value("/Branch", attribute="resource_id", value="rid7") self.assertEquals(len(res_list), 2) res_list = directory.find_by_value("/BranchB/k", attribute="resource_id", value="rid7") self.assertEquals(len(res_list), 1) res_list = directory.find_child_entries("/BranchB/k/m") self.assertEquals(len(res_list), 0) res_list = directory.find_child_entries("/BranchB") self.assertEquals(len(res_list), 2) res_list = directory.find_child_entries("/BranchB/k/m", direct_only=False) self.assertEquals(len(res_list), 0) res_list = directory.find_child_entries("/BranchB", direct_only=False) self.assertEquals(len(res_list), 4) res_list = directory.find_by_key("X") self.assertEquals(len(res_list), 2) res_list = directory.find_by_key("X", parent="/BranchB") self.assertEquals(len(res_list), 1) directory.close()
def test_directory_lock(self): dsm = DatastoreManager() ds = dsm.get_datastore("resources", "DIRECTORY") ds.delete_datastore() ds.create_datastore() self.patch_cfg('pyon.ion.directory.CFG', {'service': {'directory': {'publish_events': False}}}) directory = Directory(datastore_manager=dsm) directory.start() lock1 = directory.acquire_lock("LOCK1", lock_info=dict(process="proc1")) self.assertEquals(lock1, True) lock2 = directory.acquire_lock("LOCK1", lock_info=dict(process="proc2")) self.assertEquals(lock2, False) with self.assertRaises(BadRequest): directory.acquire_lock("LOCK/SOME") with self.assertRaises(BadRequest): directory.release_lock("LOCK/SOME") with self.assertRaises(NotFound): directory.release_lock("LOCK2") directory.release_lock("LOCK1") lock1 = directory.acquire_lock("LOCK1", lock_info=dict(process="proc3")) self.assertEquals(lock1, True) # TEST: With lock holders lock5 = directory.acquire_lock("LOCK5", lock_holder="proc1") self.assertEquals(lock5, True) lock5 = directory.acquire_lock("LOCK5", lock_holder="proc1") self.assertEquals(lock5, True) lock5 = directory.acquire_lock("LOCK5", lock_holder="proc2") self.assertEquals(lock5, False) directory.release_lock("LOCK5") # TEST: Timeout lock5 = directory.acquire_lock("LOCK5", lock_holder="proc1", timeout=100) self.assertEquals(lock5, True) lock5 = directory.acquire_lock("LOCK5", lock_holder="proc2") self.assertEquals(lock5, False) res = directory.is_locked("LOCK5") self.assertEquals(res, True) gevent.sleep(0.15) res = directory.is_locked("LOCK5") self.assertEquals(res, False) lock5 = directory.acquire_lock("LOCK5", lock_holder="proc2", timeout=100) self.assertEquals(lock5, True) gevent.sleep(0.15) # TEST: Holder self renew lock5 = directory.acquire_lock("LOCK5", lock_holder="proc2", timeout=100) self.assertEquals(lock5, True) directory.stop()
class Container(BaseContainerAgent): """ The Capability Container. Its purpose is to spawn/monitor processes and services that do the bulk of the work in the ION system. """ # Singleton static variables node = None id = None name = None pidfile = None instance = None def __init__(self, *args, **kwargs): BaseContainerAgent.__init__(self, *args, **kwargs) self._is_started = False # set id and name (as they are set in base class call) self.id = string.replace('%s_%d' % (os.uname()[1], os.getpid()), ".", "_") self.name = "cc_agent_%s" % self.id Container.instance = self # TODO: Bug: Replacing CFG instance not work because references are already public. Update directly dict_merge(CFG, kwargs, inplace=True) from pyon.core import bootstrap bootstrap.container_instance = self bootstrap.assert_configuration(CFG) bootstrap.sys_name = CFG.system.name or bootstrap.sys_name log.debug("Container (sysname=%s) initializing ..." % bootstrap.sys_name) # Keep track of the overrides from the command-line, so they can trump app/rel file data self.spawn_args = kwargs # Load object and service registry etc. bootstrap_pyon() # Create this Container's specific ExchangeManager instance self.ex_manager = ExchangeManager(self) # Create this Container's specific ProcManager instance self.proc_manager = ProcManager(self) # Create this Container's specific AppManager instance self.app_manager = AppManager(self) # DatastoreManager - controls access to Datastores (both mock and couch backed) self.datastore_manager = DatastoreManager() # File System - Interface to the OS File System, using correct path names and setups self.file_system = FileSystem(CFG) # Coordinates the container start self._is_started = False self._capabilities = [] log.debug("Container initialized, OK.") def start(self): log.debug("Container starting...") if self._is_started: raise ContainerError("Container already started") # Check if this UNIX process already runs a Container. self.pidfile = "cc-pid-%d" % os.getpid() if os.path.exists(self.pidfile): raise ContainerError("Container.on_start(): Container is a singleton per UNIX process. Existing pid file found: %s" % self.pidfile) # write out a PID file containing our agent messaging name with open(self.pidfile, 'w') as f: from pyon.core.bootstrap import get_sys_name pid_contents = {'messaging': dict(CFG.server.amqp), 'container-agent': self.name, 'container-xp': get_sys_name() } f.write(msgpack.dumps(pid_contents)) atexit.register(self._cleanup_pid) self._capabilities.append("PID_FILE") # set up abnormal termination handler for this container def handl(signum, frame): try: self._cleanup_pid() # cleanup the pidfile first self.quit() # now try to quit - will not error on second cleanup pidfile call finally: signal.signal(signal.SIGTERM, self._normal_signal) os.kill(os.getpid(), signal.SIGTERM) self._normal_signal = signal.signal(signal.SIGTERM, handl) self._capabilities.append("EXCHANGE_CONNECTION") self.datastore_manager.start() self._capabilities.append("DATASTORE_MANAGER") # Instantiate Directory and self-register self.directory = Directory() self.directory.register("/Containers", self.id, cc_agent=self.name) self._capabilities.append("DIRECTORY") # Create other repositories to make sure they are there and clean if needed self.datastore_manager.get_datastore("resources", DataStore.DS_PROFILE.RESOURCES) self.datastore_manager.get_datastore("objects", DataStore.DS_PROFILE.OBJECTS) self.state_repository = StateRepository() self._capabilities.append("STATE_REPOSITORY") self.event_repository = EventRepository() self._capabilities.append("EVENT_REPOSITORY") # Start ExchangeManager. In particular establish broker connection self.ex_manager.start() # TODO: Move this in ExchangeManager - but there is an error self.node, self.ioloop = messaging.make_node() # TODO: shortcut hack self._capabilities.append("EXCHANGE_MANAGER") self.proc_manager.start() self._capabilities.append("PROC_MANAGER") self.app_manager.start() self._capabilities.append("APP_MANAGER") # Start the CC-Agent API rsvc = ProcessRPCServer(node=self.node, name=self.name, service=self, process=self) # Start an ION process with the right kind of endpoint factory proc = self.proc_manager.proc_sup.spawn((CFG.cc.proctype or 'green', None), listener=rsvc) self.proc_manager.proc_sup.ensure_ready(proc) self._capabilities.append("CONTAINER_AGENT") self._is_started = True log.info("Container started, OK.") def serve_forever(self): """ Run the container until killed. """ log.debug("In Container.serve_forever") if not self.proc_manager.proc_sup.running: self.start() try: # This just waits in this Greenlet for all child processes to complete, # which is triggered somewhere else. self.proc_manager.proc_sup.join_children() except (KeyboardInterrupt, SystemExit) as ex: log.info('Received a kill signal, shutting down the container.') except: log.exception('Unhandled error! Forcing container shutdown') self.proc_manager.proc_sup.shutdown(CFG.cc.timeout.shutdown) def _cleanup_pid(self): if self.pidfile: log.debug("Cleanup pidfile: %s", self.pidfile) try: os.remove(self.pidfile) except Exception, e: log.warn("Pidfile could not be deleted: %s" % str(e)) self.pidfile = None
def test_directory(self): dsm = DatastoreManager() ds = dsm.get_datastore("resources", "DIRECTORY") ds.delete_datastore() ds.create_datastore() self.patch_cfg('pyon.ion.directory.CFG', {'service': {'directory': {'publish_events': False}}}) directory = Directory(datastore_manager=dsm) directory.start() #self.addCleanup(directory.dir_store.delete_datastore) objs = directory.dir_store.list_objects() root = directory.lookup("/DIR") self.assert_(root is not None) entry = directory.lookup("/temp") self.assert_(entry is None) entry_old = directory.register("/", "temp") self.assertEquals(entry_old, None) # Create a node entry = directory.lookup("/temp") self.assertEquals(entry, {} ) # The create case entry_old = directory.register("/temp", "entry1", foo="awesome") self.assertEquals(entry_old, None) entry_new = directory.lookup("/temp/entry1") self.assertEquals(entry_new, {"foo":"awesome"}) # The update case entry_old = directory.register("/temp", "entry1", foo="ingenious") self.assertEquals(entry_old, {"foo": "awesome"}) # The delete case entry_old = directory.unregister("/temp", "entry1") self.assertEquals(entry_old, {"foo": "ingenious"}) entry_new = directory.lookup("/temp/entry1") self.assertEquals(entry_new, None) directory.register("/BranchA", "X", resource_id="rid1") directory.register("/BranchA", "Y", resource_id="rid2") directory.register("/BranchA", "Z", resource_id="rid3") directory.register("/BranchA/X", "a", resource_id="rid4") directory.register("/BranchA/X", "b", resource_id="rid5") directory.register("/BranchB", "k", resource_id="rid6") directory.register("/BranchB", "l", resource_id="rid7") directory.register("/BranchB/k", "m", resource_id="rid7") directory.register("/BranchB/k", "X") res_list = directory.find_by_value("/", attribute="resource_id", value="rid3") self.assertEquals(len(res_list), 1) self.assertEquals(res_list[0].org, "ION") self.assertEquals(res_list[0].parent, "/BranchA") self.assertEquals(res_list[0].key, "Z") res_list = directory.find_by_value("/", attribute="resource_id", value="rid34") self.assertEquals(len(res_list), 0) res_list = directory.find_by_value("/", attribute="resource_id", value="rid7") self.assertEquals(len(res_list), 2) res_list = directory.find_by_value("/BranchB", attribute="resource_id", value="rid7") self.assertEquals(len(res_list), 2) res_list = directory.find_by_value("/Branch", attribute="resource_id", value="rid7") self.assertEquals(len(res_list), 2) res_list = directory.find_by_value("/BranchB/k", attribute="resource_id", value="rid7") self.assertEquals(len(res_list), 1) res_list = directory.find_child_entries("/BranchB/k/m") self.assertEquals(len(res_list), 0) res_list = directory.find_child_entries("/BranchB") self.assertEquals(len(res_list), 2) res_list = directory.find_child_entries("/BranchB/k/m", direct_only=False) self.assertEquals(len(res_list), 0) res_list = directory.find_child_entries("/BranchB", direct_only=False) self.assertEquals(len(res_list), 4) res_list = directory.find_by_key("X") self.assertEquals(len(res_list), 2) res_list = directory.find_by_key("X", parent="/BranchB") self.assertEquals(len(res_list), 1) entry_list = directory.lookup_mult("/BranchA", ["X", "Z"]) self.assertEquals(len(entry_list), 2) self.assertEquals(entry_list[0]["resource_id"], "rid1") self.assertEquals(entry_list[1]["resource_id"], "rid3") entry_list = directory.lookup_mult("/BranchA", ["Y", "FOO"]) self.assertEquals(len(entry_list), 2) self.assertEquals(entry_list[0]["resource_id"], "rid2") self.assertEquals(entry_list[1], None) # Test prevent duplicate entries directory.register("/some", "dupentry", foo="ingenious") de = directory.lookup("/some/dupentry", return_entry=True) de1_attrs = de.__dict__.copy() del de1_attrs["_id"] del de1_attrs["_rev"] del de1_attrs["type_"] de1 = DirEntry(**de1_attrs) with self.assertRaises(BadRequest) as ex: de_id1,_ = directory.dir_store.create(de1) self.assertTrue(ex.message.startswith("DirEntry already exists")) res_list = directory.find_by_key("dupentry", parent="/some") self.assertEquals(1, len(res_list))
class Container(BaseContainerAgent): """ The Capability Container. Its purpose is to spawn/monitor processes and services that do the bulk of the work in the ION system. It also manages connections to the Exchange and the various forms of datastores in the systems. """ # Singleton static variables #node = None id = None name = None pidfile = None instance = None def __init__(self, *args, **kwargs): BaseContainerAgent.__init__(self, *args, **kwargs) self._is_started = False self._capabilities = [] # set container id and cc_agent name (as they are set in base class call) self.id = get_default_container_id() self.name = "cc_agent_%s" % self.id Container.instance = self from pyon.core import bootstrap bootstrap.container_instance = self log.debug("Container (sysname=%s) initializing ..." % bootstrap.get_sys_name()) # DatastoreManager - controls access to Datastores (both mock and couch backed) self.datastore_manager = DatastoreManager() self.datastore_manager.start() self._capabilities.append("DATASTORE_MANAGER") # Keep track of the overrides from the command-line, so they can trump app/rel file data self.spawn_args = kwargs # Instantiate Directory and self-register # Has the additional side effect of either # bootstrapping the configuration into the # directory or read the configuration based # in the value of the auto_bootstrap setting self.directory = Directory() # Create this Container's specific ExchangeManager instance self.ex_manager = ExchangeManager(self) # Create this Container's specific ProcManager instance self.proc_manager = ProcManager(self) # Create this Container's specific AppManager instance self.app_manager = AppManager(self) # File System - Interface to the OS File System, using correct path names and setups self.file_system = FileSystem(CFG) # Governance Controller - manages the governance related interceptors self.governance_controller = GovernanceController(self) # sFlow manager - controls sFlow stat emission self.sflow_manager = SFlowManager(self) # Coordinates the container start self._status = "INIT" # protection for when the container itself is used as a Process for clients self.container = self log.debug("Container initialized, OK.") def start(self): log.debug("Container starting...") if self._is_started: raise ContainerError("Container already started") # Check if this UNIX process already runs a Container. self.pidfile = "cc-pid-%d" % os.getpid() if os.path.exists(self.pidfile): raise ContainerError("Container.on_start(): Container is a singleton per UNIX process. Existing pid file found: %s" % self.pidfile) # write out a PID file containing our agent messaging name with open(self.pidfile, 'w') as f: pid_contents = {'messaging': dict(CFG.server.amqp), 'container-agent': self.name, 'container-xp': bootstrap.get_sys_name() } f.write(msgpack.dumps(pid_contents)) atexit.register(self._cleanup_pid) self._capabilities.append("PID_FILE") # set up abnormal termination handler for this container def handl(signum, frame): try: self._cleanup_pid() # cleanup the pidfile first self.quit() # now try to quit - will not error on second cleanup pidfile call finally: signal.signal(signal.SIGTERM, self._normal_signal) os.kill(os.getpid(), signal.SIGTERM) self._normal_signal = signal.signal(signal.SIGTERM, handl) self.datastore_manager.start() self._capabilities.append("DATASTORE_MANAGER") # Self-register with Directory self.directory.register("/Containers", self.id, cc_agent=self.name) self.directory.register("/Containers/%s" % self.id, "Processes") self._capabilities.append("DIRECTORY") # Event repository self.event_repository = EventRepository() self.event_pub = EventPublisher() self._capabilities.append("EVENT_REPOSITORY") # Local resource registry self.resource_registry = ResourceRegistry() self._capabilities.append("RESOURCE_REGISTRY") # Persistent objects self.datastore_manager.get_datastore("objects", DataStore.DS_PROFILE.OBJECTS) # State repository self.state_repository = StateRepository() self._capabilities.append("STATE_REPOSITORY") # Start ExchangeManager, which starts the node (broker connection) self.ex_manager.start() self._capabilities.append("EXCHANGE_MANAGER") self.proc_manager.start() self._capabilities.append("PROC_MANAGER") self.app_manager.start() self._capabilities.append("APP_MANAGER") self.governance_controller.start() self._capabilities.append("GOVERNANCE_CONTROLLER") if CFG.container.get('sflow', {}).get('enabled', False): self.sflow_manager.start() self._capabilities.append("SFLOW_MANAGER") # Start the CC-Agent API rsvc = ProcessRPCServer(node=self.node, from_name=self.name, service=self, process=self) # Start an ION process with the right kind of endpoint factory proc = self.proc_manager.proc_sup.spawn(name=self.name, listeners=[rsvc], service=self) self.proc_manager.proc_sup.ensure_ready(proc) self._capabilities.append("CONTAINER_AGENT") self.event_pub.publish_event(event_type="ContainerLifecycleEvent", origin=self.id, origin_type="CapabilityContainer", sub_type="START", state=ContainerStateEnum.START) self._is_started = True self._status = "RUNNING" log.info("Container started, OK.") @property def node(self): """ Returns the active/default Node that should be used for most communication in the system. Defers to exchange manager, but only if it has been started, otherwise returns None. """ if "EXCHANGE_MANAGER" in self._capabilities: return self.ex_manager.default_node return None @contextmanager def _push_status(self, new_status): """ Temporarily sets the internal status flag. Use this as a decorator or in a with-statement before calling a temporary status changing method, like start_rel_from_url. """ curstatus = self._status self._status = new_status try: yield finally: self._status = curstatus def serve_forever(self): """ Run the container until killed. """ log.debug("In Container.serve_forever") if not self.proc_manager.proc_sup.running: self.start() # serve forever short-circuits if immediate is on and children len is ok num_procs = len(self.proc_manager.proc_sup.children) immediate = CFG.system.get('immediate', False) if not (immediate and num_procs == 1): # only spawned greenlet is the CC-Agent # print a warning just in case if immediate and num_procs != 1: log.warn("CFG.system.immediate=True but number of spawned processes is not 1 (%d)", num_procs) try: # This just waits in this Greenlet for all child processes to complete, # which is triggered somewhere else. self.proc_manager.proc_sup.join_children() except (KeyboardInterrupt, SystemExit) as ex: log.info('Received a kill signal, shutting down the container.') watch_parent = CFG.system.get('watch_parent', None) if watch_parent: watch_parent.kill() except: log.exception('Unhandled error! Forcing container shutdown') else: log.debug("Container.serve_forever short-circuiting due to CFG.system.immediate") self.proc_manager.proc_sup.shutdown(CFG.cc.timeout.shutdown) def status(self): """ Returns the internal status. """ return self._status def _cleanup_pid(self): if self.pidfile: log.debug("Cleanup pidfile: %s", self.pidfile) try: os.remove(self.pidfile) except Exception, e: log.warn("Pidfile could not be deleted: %s" % str(e)) self.pidfile = None
def test_event_persist(self): events = [{ '_id': '778dcc0811bd4b518ffd1ef873f3f457', 'base_types': ['Event', 'ResourceEvent'], 'description': 'Event to deliver the status of instrument.', 'origin': 'instrument_1', 'origin_type': 'PlatformDevice', 'sub_type': 'input_voltage', 'ts_created': '1364121284585', 'type_': 'ResourceLifecycleEvent' }, { '_id': 'b40731684e41418082e1727f3cf61026', 'base_types': ['Event', 'ResourceEvent'], 'description': 'Event to deliver the status of instrument.', 'origin': 'instrument_1', 'origin_type': 'PlatformDevice', 'sub_type': 'input_voltage', 'ts_created': '1364121284609', 'type_': 'ResourceModifiedEvent' }] dsm = DatastoreManager() ds = dsm.get_datastore(DataStore.DS_EVENTS, DataStore.DS_PROFILE.EVENTS) ds.delete_datastore() ds.create_datastore() event_repo = EventRepository(dsm) # Store one event without ID event1_dict = events[0].copy() event1_dict.pop("_id") event1_type = event1_dict.pop("type_") event1 = IonObject(event1_type, **event1_dict) event_repo.put_event(event1) events_r = event_repo.find_events(origin=event1_dict["origin"]) self.assertEquals(len(events_r), 1) event1_read = events_r[0][2] # Store one event with ID event2_dict = events[1].copy() event2_id = event2_dict.pop("_id") event2_type = event2_dict.pop("type_") event2_obj = IonObject(event2_type, **event2_dict) event2_obj._id = event2_id event_repo.put_event(event2_obj) # Store multiple new events with ID set and unset, non-existing event1_dict = events[0].copy() event1_id = event1_dict.pop("_id") event1_type = event1_dict.pop("type_") event1_obj = IonObject(event1_type, **event1_dict) event1_obj._id = create_unique_event_id() event2_dict = events[1].copy() event2_id = event2_dict.pop("_id") event2_type = event2_dict.pop("type_") event2_obj = IonObject(event2_type, **event2_dict) event_repo.put_events([event1_obj, event2_obj]) events_r = event_repo.find_events(event_type='ResourceModifiedEvent') self.assertEquals(len(events_r), 2)