def publish_error(self, request_id, error, error_code): async_res = AsyncResultMsg(result=error, request_id=request_id, ts=get_ion_ts(), status=error_code) self.pub.publish(async_res) self.pub.close()
def build_packet_from_samples(cls, samples, **kwargs): num_samples = len(samples["data"]) dtype_parts = [] for coldef in samples["cols"]: if coldef == "time": dtype_parts.append((coldef, "i8")) elif "coltypes" in samples and coldef in samples["coltypes"]: dtype_parts.append((coldef, samples["coltypes"][coldef])) else: dtype_parts.append((coldef, "f8")) dt = np.dtype(dtype_parts) data_array = np.zeros(num_samples, dtype=dt) for row_num, data_row in enumerate(samples["data"]): row_tuple = tuple( NTP4Time.np_from_string(dv) if isinstance(dv, basestring ) else dv for dv in data_row) data_array[row_num] = np.array(row_tuple, dtype=dt) data = samples.copy() data["data"] = data_array new_packet = DataPacket(ts_created=get_ion_ts(), data=data) for attr in new_packet.__dict__.keys(): if attr in ('data', 'ts_created'): continue if attr in kwargs: setattr(new_packet, attr, kwargs[attr]) return new_packet
def trigger_container_snapshot(self, snapshot_id='', include_snapshots=None, exclude_snapshots=None, take_at_time='', clear_all=False, persist_snapshot=True, snapshot_kwargs=None): if not snapshot_id: snapshot_id = get_ion_ts() if not snapshot_kwargs: snapshot_kwargs = {} self.perform_action( ALL_CONTAINERS_INSTANCE, IonObject(OT.TriggerContainerSnapshot, snapshot_id=snapshot_id, include_snapshots=include_snapshots, exclude_snapshots=exclude_snapshots, take_at_time=take_at_time, clear_all=clear_all, persist_snapshot=persist_snapshot, snapshot_kwargs=snapshot_kwargs)) log.info("Event to trigger container snapshots sent. snapshot_id=%s" % snapshot_id)
def start_agent(self, asset_id='', arguments=None): asset_obj = self._validate_resource_id("asset_id", asset_id, RT.Instrument) if not asset_obj.agent_info: raise BadRequest("Cannot find agent information") agent_info = asset_obj.agent_info[0] agent_cfg = agent_info.get("config") or {} if arguments and type(arguments) is dict: dict_merge(agent_cfg, arguments, inplace=True) dataset_info = self.get_asset_data(asset_id, data_filter=dict(get_info=True, include_data=False)) dataset_id = dataset_info["dataset_id"] agent_cfg["dataset_id"] = dataset_id agent_cfg["dataset_info"] = {} log.info("Start agent for %s with dataset %s", asset_id, dataset_id) if dataset_info and dataset_info["info"]: log.info("Agent RESTART %s. Dataset exists, last sample date: %s", asset_id, dataset_info["info"]["ts_last_str"]) agent_cfg["dataset_info"] = dataset_info["info"] log.info("Using agent_info: %s", agent_info) agent_ctl = AgentControl() agent_pid = agent_ctl.launch_agent(asset_id, agent_info["agent_type"], agent_cfg) asset_obj.agent_state = {} # We assume only 1 agent per asset asset_obj.agent_state[agent_pid] = dict(start_ts=get_ion_ts()) self.rr.update(asset_obj) log.info("Agent started for %s with pid=%s", asset_id, agent_pid) return agent_pid
def start_agent(self, asset_id='', arguments=None): asset_obj = self._validate_resource_id("asset_id", asset_id, RT.Instrument) if not asset_obj.agent_info: raise BadRequest("Cannot find agent information") agent_info = asset_obj.agent_info[0] agent_cfg = agent_info.get("config") or {} if arguments and type(arguments) is dict: dict_merge(agent_cfg, arguments, inplace=True) dataset_info = self.get_asset_data(asset_id, data_filter=dict( get_info=True, include_data=False)) dataset_id = dataset_info["dataset_id"] agent_cfg["dataset_id"] = dataset_id agent_cfg["dataset_info"] = {} log.info("Start agent for %s with dataset %s", asset_id, dataset_id) if dataset_info and dataset_info["info"]: log.info("Agent RESTART %s. Dataset exists, last sample date: %s", asset_id, dataset_info["info"]["ts_last_str"]) agent_cfg["dataset_info"] = dataset_info["info"] log.info("Using agent_info: %s", agent_info) agent_ctl = AgentControl() agent_pid = agent_ctl.launch_agent(asset_id, agent_info["agent_type"], agent_cfg) asset_obj.agent_state = {} # We assume only 1 agent per asset asset_obj.agent_state[agent_pid] = dict(start_ts=get_ion_ts()) self.rr.update(asset_obj) log.info("Agent started for %s with pid=%s", asset_id, agent_pid) return agent_pid
def _create_association(self, subject=None, predicate=None, obj=None, support_bulk=False): """ Create an association between two IonObjects with a given predicate. Supports bulk mode """ if self.bulk and support_bulk: if not subject or not predicate or not obj: raise BadRequest("Association must have all elements set: %s/%s/%s" % (subject, predicate, obj)) if isinstance(subject, basestring): subject = self._get_resource_obj(subject) if "_id" not in subject: raise BadRequest("Subject id not available") subject_id = subject._id st = subject.type_ if isinstance(obj, basestring): obj = self._get_resource_obj(obj) if "_id" not in obj: raise BadRequest("Object id not available") object_id = obj._id ot = obj.type_ assoc_id = create_unique_association_id() assoc_obj = IonObject("Association", s=subject_id, st=st, p=predicate, o=object_id, ot=ot, ts=get_ion_ts()) assoc_obj._id = assoc_id self.bulk_associations[assoc_id] = assoc_obj return assoc_id, '1-norev' else: return self.rr.create_association(subject, predicate, obj)
def get_asset_data(self, asset_id='', data_format='', data_filter=None): asset_obj = self._validate_resource_id("asset_id", asset_id, RT.Instrument) dataset_objs, _ = self.rr.find_objects(asset_id, PRED.hasDataset, RT.Dataset, id_only=False) if not dataset_objs: raise BadRequest("Could not find dataset") dataset_obj = dataset_objs[0] from ion.data.persist.hdf5_dataset import DatasetHDF5Persistence persistence = DatasetHDF5Persistence(dataset_obj._id, dataset_obj.schema_definition, "hdf5") data_filter1 = dict(transpose_time=True, time_format="unix_millis", max_rows=1000) data_filter1.update(data_filter or {}) data_info = dict(dataset_id=dataset_obj._id, ts_generated=get_ion_ts(), data={}, info={}, num_rows=0) if data_filter1.get("get_info", None) is True: data_info["variables"] = [var_info["name"] for var_info in dataset_obj.schema_definition["variables"]] data_info["schema"] = dataset_obj.schema_definition res_info = persistence.get_data_info(data_filter1) data_info["info"].update(res_info) if data_filter1.get("include_data", True): raw_data = persistence.get_data(data_filter=data_filter1) data_info["data"] = raw_data data_info["num_rows"] = len(raw_data.values()[0]) if raw_data else 0 return data_info
def find_events(self, origin='', type='', min_datetime='', max_datetime='', limit=-1, descending=False, skip=0, computed=False): """ Returns a list of events that match the specified search criteria. Can return a list of EventComputedAttributes if requested with event objects contained. Pagination arguments are supported. @param origin str @param min_datetime str milliseconds @param max_datetime str milliseconds @param limit int (integer limiting the number of results (0 means unlimited)) @param descending boolean (if True, reverse order (of production time) is applied, e.g. most recent first) @retval event_list [] """ if limit == 0: limit = int(self.CFG.get_safe("service.user_notification.max_events_limit", 1000)) if max_datetime == "now": max_datetime = get_ion_ts() event_tuples = self.container.event_repository.find_events(event_type=type, origin=origin, start_ts=min_datetime, end_ts=max_datetime, limit=limit, descending=descending, skip=skip) events = [item[2] for item in event_tuples] log.debug("find_events found %s events", len(events)) if computed: computed_events = self._get_computed_events(events, include_events=True) events = computed_events.computed_list return events
def check_actor_credentials(self, username='', password=''): if not username: raise BadRequest("Invalid argument username") if not password: raise BadRequest("Invalid argument password") actor_id = self.find_actor_identity_by_username(username) actor_obj = self.read_actor_identity(actor_id) try: if actor_obj.auth_status != AuthStatusEnum.ENABLED: raise NotFound("Actor not enabled") cred_obj = None for cred in actor_obj.credentials: if cred.username == username: cred_obj = cred break if bcrypt.hashpw(password, cred_obj.password_salt) != cred_obj.password_hash: # Failed login if password: # Only record fail if password is non-empty and wrong actor_obj.auth_fail_count += 1 actor_obj.auth_ts_last_fail = get_ion_ts() max_fail_cnt = IdentityUtils.get_auth_fail_lock_count() if actor_obj.auth_fail_count > max_fail_cnt: actor_obj.auth_status = AuthStatusEnum.LOCKED raise NotFound("Invalid password") # Success actor_obj.auth_count += 1 actor_obj.auth_fail_count = 0 actor_obj.auth_ts_last = get_ion_ts() return actor_obj._id finally: # Lower level RR call to avoid credentials clearing self.rr.update(actor_obj) self._publish_auth_event(actor_obj, username)
def check_actor_credentials(self, username='', password=''): if not username: raise BadRequest("Invalid argument username") if not password: raise BadRequest("Invalid argument password") actor_id = self.find_actor_identity_by_username(username) actor_obj = self.read_actor_identity(actor_id) try: if actor_obj.auth_status != AuthStatusEnum.ENABLED: raise NotFound("identity not enabled") cred_obj = None for cred in actor_obj.credentials: if cred.username == username: cred_obj = cred break if bcrypt.hashpw(password, cred_obj.password_salt) != cred_obj.password_hash: # Failed login if password: # Only record fail if password is non-empty and wrong actor_obj.auth_fail_count += 1 actor_obj.auth_ts_last_fail = get_ion_ts() max_fail_cnt = IdentityUtils.get_auth_fail_lock_count() if actor_obj.auth_fail_count > max_fail_cnt: actor_obj.auth_status = AuthStatusEnum.LOCKED raise NotFound("Invalid password") # Success actor_obj.auth_count += 1 actor_obj.auth_fail_count = 0 actor_obj.auth_ts_last = get_ion_ts() return actor_obj._id finally: # Lower level RR call to avoid credentials clearing self.rr.update(actor_obj)
def _create_bulk_resource(self, res_obj, res_alias=None): if not hasattr(res_obj, "_id"): res_obj._id = create_unique_resource_id() ts = get_ion_ts() if hasattr(res_obj, "ts_created") and not res_obj.ts_created: res_obj.ts_created = ts if hasattr(res_obj, "ts_updated") and not res_obj.ts_updated: res_obj.ts_updated = ts res_id = res_obj._id self.bulk_resources[res_id] = res_obj if res_alias: self._register_id(res_alias, res_id, res_obj) return res_id
def trigger_container_snapshot(self, snapshot_id='', include_snapshots=None, exclude_snapshots=None, take_at_time='', clear_all=False, persist_snapshot=True, snapshot_kwargs=None): if not snapshot_id: snapshot_id = get_ion_ts() if not snapshot_kwargs: snapshot_kwargs = {} self.perform_action(ALL_CONTAINERS_INSTANCE, IonObject(OT.TriggerContainerSnapshot, snapshot_id=snapshot_id, include_snapshots=include_snapshots, exclude_snapshots=exclude_snapshots, take_at_time=take_at_time, clear_all=clear_all, persist_snapshot=persist_snapshot, snapshot_kwargs=snapshot_kwargs)) log.info("Event to trigger container snapshots sent. snapshot_id=%s" % snapshot_id)
def on_start(self): self.ION_NOTIFICATION_EMAIL_ADDRESS = CFG.get_safe('server.smtp.sender') # Create an event processor self.event_processor = EmailEventProcessor() # Dictionaries that maintain information asetting_up_smtp_clientbout users and their subscribed notifications self.user_info = {} # The reverse_user_info is calculated from the user_info dictionary self.reverse_user_info = {} self.event_publisher = EventPublisher(process=self) self.start_time = get_ion_ts() #------------------------------------------------------------------------------------ # Create an event subscriber for Reload User Info events #------------------------------------------------------------------------------------ def reload_user_info(event_msg, headers): """ Callback method for the subscriber to ReloadUserInfoEvent """ notification_id = event_msg.notification_id log.debug("(UNS instance) received a ReloadNotificationEvent. The relevant notification_id is %s" % notification_id) try: self.user_info = self.load_user_info() except NotFound: log.warning("ElasticSearch has not yet loaded the user_index.") self.reverse_user_info = calculate_reverse_user_info(self.user_info) log.debug("(UNS instance) After a reload, the user_info: %s" % self.user_info) log.debug("(UNS instance) The recalculated reverse_user_info: %s" % self.reverse_user_info) # the subscriber for the ReloadUSerInfoEvent self.reload_user_info_subscriber = EventSubscriber( event_type=OT.ReloadUserInfoEvent, origin='UserNotificationService', callback=reload_user_info ) self.add_endpoint(self.reload_user_info_subscriber)
def get_recent_events(self, resource_id='', limit=0, skip=0): """ Returns a list of EventComputedAttributes for events that match the resource id as origin, in descending order, most recent first. The total number of events is limited by default based on system configuration. Pagination arguments are supported. @param resource_id str @param limit int (if 0 is given @retval ComputedListValue with value list of 4-tuple with Event objects """ if limit == 0: limit = int(self.CFG.get_safe("service.user_notification.max_events_limit", 1000)) now = get_ion_ts() events = self.find_events(origin=resource_id, max_datetime=now, descending=True, limit=limit, skip=skip) computed_events = self._get_computed_events(events) return computed_events
def get_asset_data(self, asset_id='', data_format='', data_filter=None): asset_obj = self._validate_resource_id("asset_id", asset_id, RT.Instrument) dataset_objs, _ = self.rr.find_objects(asset_id, PRED.hasDataset, RT.Dataset, id_only=False) if not dataset_objs: raise BadRequest("Could not find dataset") dataset_obj = dataset_objs[0] from ion.data.persist.hdf5_dataset import DatasetHDF5Persistence persistence = DatasetHDF5Persistence(dataset_obj._id, dataset_obj.schema_definition, "hdf5") data_filter1 = dict(transpose_time=True, time_format="unix_millis", max_rows=1000) data_filter1.update(data_filter or {}) data_info = dict(dataset_id=dataset_obj._id, ts_generated=get_ion_ts(), data={}, info={}, num_rows=0) if data_filter1.get("get_info", None) is True: data_info["variables"] = [ var_info["name"] for var_info in dataset_obj.schema_definition["variables"] ] data_info["schema"] = dataset_obj.schema_definition res_info = persistence.get_data_info(data_filter1) data_info["info"].update(res_info) if data_filter1.get("include_data", True): raw_data = persistence.get_data(data_filter=data_filter1) data_info["data"] = raw_data data_info["num_rows"] = len( raw_data.values()[0]) if raw_data else 0 return data_info
def build_packet_from_samples(cls, samples, **kwargs): num_samples = len(samples["data"]) dtype_parts = [] for coldef in samples["cols"]: if coldef == "time": dtype_parts.append((coldef, "i8")) else: dtype_parts.append((coldef, "f8")) dt = np.dtype(dtype_parts) data_array = np.zeros(num_samples, dtype=dt) for row_num, data_row in enumerate(samples["data"]): row_tuple = tuple(NTP4Time.np_from_string(dv) if isinstance(dv, basestring) else dv for dv in data_row) data_array[row_num] = np.array(row_tuple, dtype=dt) data = samples.copy() data["data"] = data_array new_packet = DataPacket(ts_created=get_ion_ts(), data=data) for attr in new_packet.__dict__.keys(): if attr in ('data', 'ts_created'): continue if attr in kwargs: setattr(new_packet, attr, kwargs[attr]) return new_packet
def delete_notification(self, notification_id=''): """For now, permanently deletes NotificationRequest object with the specified id. Throws exception if id does not match any persisted NotificationRequest. @param notification_id str @throws NotFound object with specified id does not exist """ #------------------------------------------------------------------------------------------------------------------- # Stop the event subscriber for the notification #------------------------------------------------------------------------------------------------------------------- notification_request = self.clients.resource_registry.read(notification_id) #------------------------------------------------------------------------------------------------------------------- # Update the resource registry #------------------------------------------------------------------------------------------------------------------- notification_request.temporal_bounds.end_datetime = get_ion_ts() self.clients.resource_registry.update(notification_request) #------------------------------------------------------------------------------------------------------------------- # Find users who are interested in the notification and update the notification in the list maintained by the UserInfo object #------------------------------------------------------------------------------------------------------------------- # user_ids, _ = self.clients.resource_registry.find_subjects(RT.UserInfo, PRED.hasNotification, notification_id, True) # # for user_id in user_ids: # self.update_user_info_object(user_id, notification_request) #------------------------------------------------------------------------------------------------------------------- # Generate an event that can be picked by a notification worker so that it can update its user_info dictionary #------------------------------------------------------------------------------------------------------------------- log.info("(delete notification) Publishing ReloadUserInfoEvent for notification_id: %s", notification_id) self.event_publisher.publish_event( event_type= OT.ReloadUserInfoEvent, origin="UserNotificationService", description= "A notification has been deleted.", notification_id = notification_id)
def create_notification(self, notification=None, user_id=''): """ Persists the provided NotificationRequest object for the specified Origin id. Associate the Notification resource with the user_id string. returned id is the internal id by which NotificationRequest will be identified in the data store. @param notification NotificationRequest @param user_id str @retval notification_id str @throws BadRequest if object passed has _id or _rev attribute """ if not user_id: raise BadRequest("User id not provided.") log.debug("Create notification called for user_id: %s, and notification: %s", user_id, notification) #--------------------------------------------------------------------------------------------------- # Persist Notification object as a resource if it has already not been persisted #--------------------------------------------------------------------------------------------------- notification_id = None # if the notification has already been registered, simply use the old id existing_user_notifications = self.get_user_notifications(user_info_id=user_id) if existing_user_notifications: notification_id = self._notification_in_notifications(notification, existing_user_notifications) # since the notification has not been registered yet, register it and get the id temporal_bounds = TemporalBounds() temporal_bounds.start_datetime = get_ion_ts() temporal_bounds.end_datetime = '' if not notification_id: notification.temporal_bounds = temporal_bounds notification_id, rev = self.clients.resource_registry.create(notification) else: log.debug("Notification object has already been created in resource registry before. No new id to be generated. notification_id: %s", notification_id) # Read the old notification already in the resource registry notification = self.clients.resource_registry.read(notification_id) # Update the temporal bounds of the old notification resource notification.temporal_bounds = temporal_bounds # Update the notification in the resource registry self.clients.resource_registry.update(notification) log.debug("The temporal bounds for this resubscribed notification object with id: %s, is: %s", notification._id,notification.temporal_bounds) # Link the user and the notification with a hasNotification association assocs= self.clients.resource_registry.find_associations(subject=user_id, predicate=PRED.hasNotification, object=notification_id, id_only=True) if assocs: log.debug("Got an already existing association: %s, between user_id: %s, and notification_id: %s", assocs,user_id,notification_id) return notification_id else: log.debug("Creating association between user_id: %s, and notification_id: %s", user_id, notification_id ) self.clients.resource_registry.create_association(user_id, PRED.hasNotification, notification_id) # read the registered notification request object because this has an _id and is more useful notification = self.clients.resource_registry.read(notification_id) #------------------------------------------------------------------------------------------------------------------- # Generate an event that can be picked by a notification worker so that it can update its user_info dictionary #------------------------------------------------------------------------------------------------------------------- #log.debug("(create notification) Publishing ReloadUserInfoEvent for notification_id: %s", notification_id) self.event_publisher.publish_event( event_type= OT.ReloadUserInfoEvent, origin="UserNotificationService", description= "A notification has been created.", notification_id = notification_id) return notification_id
def process(event_msg, headers): self.end_time = get_ion_ts() # run the process_batch() method self.process_batch(start_time=self.start_time, end_time=self.end_time) self.start_time = self.end_time
def _get_dir_entry(self, scenario): return dict(ts=get_ion_ts(), name=scenario, sys_name=get_sys_name())
def publish_result(self, request_id, result): async_res = AsyncResultMsg(result=result, request_id=request_id, ts=get_ion_ts()) self.pub.publish(async_res) self.pub.close()