コード例 #1
0
def process_oms_event():

    json_params = {}

    # oms direct request
    if request.data:
        json_params  = json_loads(str(request.data))
        log.debug('ServiceGatewayService:process_oms_event request.data:  %s', json_params)

    #validate payload
    if 'platform_id' not in json_params or 'message' not in json_params:
        log.warning('Invalid OMS event format. payload_data: %s', json_params)
        #return gateway_json_response(OMS_BAD_REQUEST_RESPONSE)

    #prepare the event information
    try:
        #create a publisher to relay OMS events into the system as DeviceEvents
        event_publisher = EventPublisher()

        event_publisher.publish_event(
            event_type='OMSDeviceStatusEvent',
            origin_type='OMS Platform',
            origin=json_params.get('platform_id', 'NOT PROVIDED'),
            sub_type='',
            description = json_params.get('message', ''),
            status_details = json_params)
    except Exception, e:
        log.error('Could not publish OMS  event: %s. Event data: %s', e.message, json_params)
コード例 #2
0
	def write(self, text):
		log.debug("TelnetServer.write(): text = " + str(text))
		if self.fileobj:
			self.fileobj.write(text)
			self.fileobj.flush()
		else:
			log.warning("TelnetServer.write(): no connection yet, can not write text")			
コード例 #3
0
    def find_events(self, origin='', type='', min_datetime=0, max_datetime=0, limit= -1, descending=False):
        """
        This method leverages couchdb view and simple filters. It does not use elastic search.

        Returns a list of events that match the specified search criteria. Will throw a not NotFound exception
        if no events exist for the given parameters.

        @param origin         str
        @param event_type     str
        @param min_datetime   int  seconds
        @param max_datetime   int  seconds
        @param limit          int         (integer limiting the number of results (0 means unlimited))
        @param descending     boolean     (if True, reverse order (of production time) is applied, e.g. most recent first)
        @retval event_list    []
        @throws NotFound    object with specified parameters does not exist
        @throws NotFound    object with specified parameters does not exist
        """
        event_tuples = []

        try:
            event_tuples = self.container.event_repository.find_events(event_type=type, origin=origin, start_ts=min_datetime, end_ts=max_datetime, limit=limit, descending=descending)
        except Exception as exc:
            log.warning("The UNS find_events operation for event origin = %s and type = %s failed. Error message = %s", origin, type, exc.message)

        events = [item[2] for item in event_tuples]
        log.debug("(find_events) UNS found the following relevant events: %s", events)

        return events
コード例 #4
0
ファイル: telnet_server.py プロジェクト: daf/coi-services
	def __init__(self, inputCallback=None):
		# use globals to pass configuration to telnet handler when it is started by
		# TCP socket server
		global username, password, child_connection
		
		#log.getLogger('').setLevel(log.DEBUG)
		log.debug("TelnetServer.__init__()")
		if not inputCallback:
			log.warning("TelnetServer.__init__(): callback not specified")
			raise ServerError("callback not specified")
		self.parentInputCallback = inputCallback
		
		# TODO: get username and password dynamically
		username = '******'
		password = '******'
	
		# TODO: get ip_address & port number dynamically
		# TODO: ensure that port is not already in use
		self.port = 8000
		#self.ip_address = 'localhost'
		self.ip_address = '67.58.49.202'
		
		# setup a pipe to allow telnet server process to communicate with callbackProxy
		self.parent_connection, child_connection = multiprocessing.Pipe()
		
		# create telnet server object and start the server process
		self.tns = TcpSocketServer((self.ip_address, self.port), TelnetHandler)
		self.serverProcess = multiprocessing.Process(target=self.runServer)
		self.serverProcess.start()
		
		# start the callbackProxy thread to receive client input from telnet server process
		self.callbackProxyThread = threading.Thread(target=self.runCallbackProxy)
		#log.debug("TelnetHandler.setup(): starting callbackProxy thread")
		self.callbackProxyThread.setDaemon(True)
		self.callbackProxyThread.start()
コード例 #5
0
def process_oms_event():

    json_params = {}

    # oms direct request
    if request.data:
        json_params = simplejson.loads(str(request.data))
        log.debug('ServiceGatewayService:process_oms_event request.data:  %s',
                  json_params)

    #validate payload
    if 'platform_id' not in json_params or 'message' not in json_params:
        log.warning('Invalid OMS event format. payload_data: %s', json_params)
        #return gateway_json_response(OMS_BAD_REQUEST_RESPONSE)

    #prepare the event information
    try:
        #create a publisher to relay OMS events into the system as DeviceEvents
        event_publisher = EventPublisher()

        event_publisher.publish_event(
            event_type='OMSDeviceStatusEvent',
            origin_type='OMS Platform',
            origin=json_params.get('platform_id', 'NOT PROVIDED'),
            sub_type='',
            description=json_params.get('message', ''),
            status_details=json_params)
    except Exception, e:
        log.error('Could not publish OMS  event: %s. Event data: %s',
                  e.message, json_params)
コード例 #6
0
    def _cov2granule(cls, coverage, start_time=None, end_time=None, stride_time=None, stream_def_id=None, parameters=None, tdoa=None):

        if tdoa is None:
            if start_time is not None:
                start_time = cls.convert_time(coverage, start_time)
            if end_time is not None:
                end_time = cls.convert_time(coverage, end_time)
            slice_ = slice(start_time, end_time, stride_time)
        
        if stream_def_id:
            rdt = RecordDictionaryTool(stream_definition_id=stream_def_id)
        else:
            rdt = RecordDictionaryTool(param_dictionary=coverage.parameter_dictionary)
        if tdoa:
            vdict = coverage.get_value_dictionary(parameters or rdt.fields, domain_slice=tdoa)
        else:
            vdict = coverage.get_value_dictionary(parameters or rdt.fields, temporal_slice=slice_)
        if not vdict:
            log.warning('Retrieve returning empty set')
            return rdt
        rdt[coverage.temporal_parameter_name] = vdict[coverage.temporal_parameter_name]
        for k,v in vdict.iteritems():
            if k == coverage.temporal_parameter_name:
                continue
            # The values have already been inside a coverage so we know they're safe and they exist, so they can be inserted directly.
            rdt._rd[k] = v
            #rdt[k] = v

        return rdt
コード例 #7
0
ファイル: ion_telnet_server.py プロジェクト: daf/coi-services
	def __init__(self, input_callback=None):
		log.debug("TelnetServer.__init__()")
		if not input_callback:
			log.warning("TelnetServer.__init__(): callback not specified")
			raise ServerError("callback not specified")
		self.parent_input_callback = input_callback
		
		# TODO: get username and password dynamically
		self.username = '******'
		self.password = '******'
	
		# TODO: get ip_address & port number dynamically
		# TODO: ensure that port is not already in use
		self.port = self.PORT_RANGE_LOWER
		self.ip_address = 'localhost'
		#self.ip_address = '67.58.49.202'
			
		# create telnet server object and start the server process
		self.server_socket = socket.socket()
		self.server_socket.allow_reuse_address = True
		while True:
			try:
				self.server_socket.bind((self.ip_address, self.port))
				break
			except:
				self.port = self.port + 1
				log.debug("trying to bind to port " + str(self.port))
				if self.port > self.PORT_RANGE_UPPER:
					log.warning("TelnetServer.server_greenlet(): no available ports for server")
					self.close_connection()
					return
		gevent.spawn(self.server_greenlet)
コード例 #8
0
ファイル: endpoint.py プロジェクト: swarbhanu/pyon
def log_message(prefix="MESSAGE", msg=None, headers=None, recv=None, delivery_tag=None, is_send=True):
    """
    Utility function to print an legible comprehensive summary of a received message.
    @NOTE: This is an expensive operation
    """
    try:
        headers = headers or {}
        _sender = headers.get('sender', '?') + "(" + headers.get('sender-name', '') + ")"
        _send_hl, _recv_hl = ("###", "") if is_send else ("", "###")

        if recv and getattr(recv, '__iter__', False):
            recv = ".".join(str(item) for item in recv if item)
        _recv = headers.get('receiver', '?')
        _opstat = "op=%s"%headers.get('op', '') if 'op' in headers else "status=%s"%headers.get('status_code', '')
        try:
            import msgpack
            _msg = msgpack.unpackb(msg)
            _msg = str(_msg)
        except Exception:
            _msg = str(msg)
        _msg = _msg[0:400]+"..." if len(_msg) > 400 else _msg
        _delivery = "\nDELIVERY: tag=%s"%delivery_tag if delivery_tag else ""
        log.info("%s: %s%s%s -> %s%s%s %s:\nHEADERS: %s\nCONTENT: %s%s",
            prefix, _send_hl, _sender, _send_hl, _recv_hl, _recv, _recv_hl, _opstat, str(headers), _msg, _delivery)
    except Exception as ex:
        log.warning("%s log error: %s", prefix, str(ex))
コード例 #9
0
    def find_events(self, origin='', type='', min_datetime=0, max_datetime=0, limit= -1, descending=False):
        """
        This method leverages couchdb view and simple filters. It does not use elastic search.

        Returns a list of events that match the specified search criteria. Will throw a not NotFound exception
        if no events exist for the given parameters.

        @param origin         str
        @param event_type     str
        @param min_datetime   int  seconds
        @param max_datetime   int  seconds
        @param limit          int         (integer limiting the number of results (0 means unlimited))
        @param descending     boolean     (if True, reverse order (of production time) is applied, e.g. most recent first)
        @retval event_list    []
        @throws NotFound    object with specified parameters does not exist
        @throws NotFound    object with specified parameters does not exist
        """
        event_tuples = []

        try:
            event_tuples = self.container.event_repository.find_events(event_type=type, origin=origin, start_ts=min_datetime, end_ts=max_datetime, limit=limit, descending=descending)
        except Exception as exc:
            log.warning("The UNS find_events operation for event origin = %s and type = %s failed. Error message = %s", origin, type, exc.message)

        events = [item[2] for item in event_tuples]
        log.debug("(find_events) UNS found the following relevant events: %s", events)

        return events
コード例 #10
0
def log_message(prefix="MESSAGE",
                msg=None,
                headers=None,
                recv=None,
                delivery_tag=None,
                is_send=True):
    """
    Utility function to print an legible comprehensive summary of a received message.
    @NOTE: This is an expensive operation
    """
    try:
        headers = headers or {}
        _sender = headers.get('sender', '?') + "(" + headers.get(
            'sender-name', '') + ")"
        _send_hl, _recv_hl = ("###", "") if is_send else ("", "###")

        if recv and getattr(recv, '__iter__', False):
            recv = ".".join(str(item) for item in recv if item)
        _recv = headers.get('receiver', '?')
        _opstat = "op=%s" % headers.get(
            'op', '') if 'op' in headers else "status=%s" % headers.get(
                'status_code', '')
        try:
            import msgpack
            _msg = msgpack.unpackb(msg)
            _msg = str(_msg)
        except Exception:
            _msg = str(msg)
        _msg = _msg[0:400] + "..." if len(_msg) > 400 else _msg
        _delivery = "\nDELIVERY: tag=%s" % delivery_tag if delivery_tag else ""
        log.info("%s: %s%s%s -> %s%s%s %s:\nHEADERS: %s\nCONTENT: %s%s",
                 prefix, _send_hl, _sender, _send_hl, _recv_hl, _recv,
                 _recv_hl, _opstat, str(headers), _msg, _delivery)
    except Exception as ex:
        log.warning("%s log error: %s", prefix, str(ex))
コード例 #11
0
ファイル: service.py プロジェクト: edwardhunter/scioncc
 def build_service_map(self):
     """
     Adds all known service definitions to service registry.
     @todo: May be a bit fragile due to using BaseService.__subclasses__
     """
     for cls in BaseService.__subclasses__():
         assert hasattr(cls, 'name'), 'Service class must define name value. Service class in error: %s' % cls
         if cls.name:
             self.services_by_name[cls.name] = cls
             self.add_servicedef_entry(cls.name, "base", cls)
             try:
                 self.add_servicedef_entry(cls.name, "schema", json.loads(cls.SCHEMA_JSON))
             except Exception as ex:
                 log.exception("Cannot parse service schema " + cls.name)
             interfaces = list(implementedBy(cls))
             if interfaces:
                 self.add_servicedef_entry(cls.name, "interface", interfaces[0])
             if cls.__name__.startswith("Base"):
                 try:
                     client = "%s.%sProcessClient" % (cls.__module__, cls.__name__[4:])
                     self.add_servicedef_entry(cls.name, "client", named_any(client))
                     sclient = "%s.%sClient" % (cls.__module__, cls.__name__[4:])
                     self.add_servicedef_entry(cls.name, "simple_client", named_any(sclient))
                 except Exception, ex:
                     log.warning("Cannot find client for service %s" % (cls.name))
コード例 #12
0
 def build_service_map(self):
     """
     Adds all known service definitions to service registry.
     @todo: May be a bit fragile due to using BaseService.__subclasses__
     """
     for cls in BaseService.__subclasses__():
         assert hasattr(
             cls, 'name'
         ), 'Service class must define name value. Service class in error: %s' % cls
         if cls.name:
             self.services_by_name[cls.name] = cls
             self.add_servicedef_entry(cls.name, "base", cls)
             try:
                 self.add_servicedef_entry(cls.name, "schema",
                                           json.loads(cls.SCHEMA_JSON))
             except Exception as ex:
                 log.exception("Cannot parse service schema " + cls.name)
             interfaces = list(implementedBy(cls))
             if interfaces:
                 self.add_servicedef_entry(cls.name, "interface",
                                           interfaces[0])
             if cls.__name__.startswith("Base"):
                 try:
                     client = "%s.%sProcessClient" % (cls.__module__,
                                                      cls.__name__[4:])
                     self.add_servicedef_entry(cls.name, "client",
                                               named_any(client))
                     sclient = "%s.%sClient" % (cls.__module__,
                                                cls.__name__[4:])
                     self.add_servicedef_entry(cls.name, "simple_client",
                                               named_any(sclient))
                 except Exception, ex:
                     log.warning("Cannot find client for service %s" %
                                 (cls.name))
コード例 #13
0
    def set_task(self, task_time, message):

        #------------------------------------------------------------------------------------
        # get the current time. Ex: datetime.datetime(2012, 7, 12, 14, 30, 6, 769776)
        #------------------------------------------------------------------------------------

        current_time = datetime.datetime.today()

        #------------------------------------------------------------------------------------
        # Calculate the time to wait
        #------------------------------------------------------------------------------------
        wait_time = datetime.timedelta( days = task_time.day - current_time.day,
                                        hours = task_time.hour - current_time.hour,
                                        minutes = task_time.minute - current_time.minute,
                                        seconds = task_time.second - current_time.second)

        log.info("Fake scheduler calculated wait_time = %s" % wait_time)

        seconds = wait_time.total_seconds()

        if seconds < 0:
            log.warning("Calculated wait time: %s seconds. Publishing immediately.")
            seconds = 0

        log.info("Total seconds of wait time = %s" % seconds)

        # this has to be replaced by something better
        gevent.sleep(seconds)

        self.event_publisher.publish_event(origin='Scheduler', description = message)
        log.info("Fake scheduler published a SchedulerEvent")
コード例 #14
0
 def validate_compatibility(self, data_process_definition_id='', in_data_product_ids=None, out_data_product_ids=None, routes=None):
     '''
     Validates compatibility between input and output data products
     routes are in this form:
     { (in_data_product_id, out_data_product_id) : actor }
         if actor is None then the data process is assumed to use parameter functions.
         if actor is a TransformFunction, the validation is done at runtime
     '''
     if data_process_definition_id:
         input_stream_def_ids, _ = self.clients.resource_registry.find_objects(subject=data_process_definition_id, predicate=PRED.hasInputStreamDefinition, id_only=True)
         output_stream_def_ids, _ = self.clients.resource_registry.find_objects(subject=data_process_definition_id, predicate=PRED.hasStreamDefinition, id_only=True)
         for in_data_product_id in in_data_product_ids:
             input_stream_def = self.stream_def_from_data_product(in_data_product_id)
             if input_stream_def not in input_stream_def_ids:
                 log.warning('Creating a data process with an unmatched stream definition input')
         for out_data_product_id in out_data_product_ids:
             output_stream_def = self.stream_def_from_data_product(out_data_product_id)
             if output_stream_def not in output_stream_def_ids:
                 log.warning('Creating a data process with an unmatched stream definition output')
     
     if not out_data_product_ids and data_process_definition_id:
         return True
     if len(out_data_product_ids)>1 and not routes and not data_process_definition_id:
         raise BadRequest('Multiple output data products but no routes defined')
     if len(out_data_product_ids)==1:
         return all( [self._validator(i, out_data_product_ids[0]) for i in in_data_product_ids] )
     elif len(out_data_product_ids)>1:
         for in_dp_id,out in routes.iteritems():
             for out_dp_id, actor in out.iteritems():
                 if not self._validator(in_dp_id, out_dp_id):
                     return False
         return True
     else:
         raise BadRequest('No input data products specified')
コード例 #15
0
    def on_start(self):
        '''
        Binds the publisher to the transform
        '''
        super(TransformStreamPublisher, self).on_start()
        self.stream_id = self.CFG.get_safe('process.stream_id', '')
        self.exchange_point = self.CFG.get_safe('process.exchange_point',
                                                'science_data')
        self.routing_key = self.CFG.get_safe('process.routing_key', '')

        # We do not want processes to make service calls
        # A StreamPublisher has a behavior built-in to create a stream
        # If no stream_id and route are specified.
        # We will use the container attached endpoints instead of making a new stream
        if not (self.stream_id or self.routing_key):
            output_streams = copy(self.CFG.get_safe('process.publish_streams'))
            first_stream = output_streams.popitem()
            try:
                self.publisher = getattr(self, first_stream[0])
            except AttributeError:
                log.warning('no publisher endpoint located')
                self.publisher = None
        else:
            self.publisher = StreamPublisher(
                process=self,
                stream_id=self.stream_id,
                exchange_point=self.exchange_point,
                routing_key=self.routing_key)
コード例 #16
0
ファイル: messaging.py プロジェクト: j2project/pyon
    def channel(self, ch_type, transport=None):
        """
        Creates a Channel object with an underlying transport callback and returns it.

        @type ch_type   BaseChannel
        """
        #log.debug("NodeB.channel")
        with self._lock:
            # having _queue_auto_delete on is a pre-req to being able to pool.
            if ch_type == channel.BidirClientChannel and not ch_type._queue_auto_delete:

                # only attempt this 5 times - somewhat arbitrary but we can't have an infinite loop here
                attempts = 5
                while attempts > 0:
                    attempts -= 1

                    chid = self._pool.get_id()
                    if chid in self._bidir_pool:
                        log.debug("BidirClientChannel requested, pulling from pool (%d)", chid)
                        assert not chid in self._pool_map.values()

                        # we need to check the health of this bidir channel
                        ch = self._bidir_pool[chid]
                        if not self._check_pooled_channel_health(ch):
                            log.warning("Channel (%d) failed health check, removing from pool", ch.get_channel_id())

                            # return chid to the id pool
                            self._pool.release_id(chid)

                            # remove this channel from the pool, put into dead pool
                            self._dead_pool.append(ch)
                            del self._bidir_pool[chid]

                            # now close the channel (must remove our close callback which returns it to the pool)
                            assert ch._close_callback == self.on_channel_request_close
                            ch._close_callback = None
                            ch.close()

                            # resume the loop to attempt to get one again
                            continue

                        self._pool_map[ch.get_channel_id()] = chid
                    else:
                        log.debug("BidirClientChannel requested, no pool items available, creating new (%d)", chid)
                        ch = self._new_channel(ch_type, transport=transport)
                        ch.set_close_callback(self.on_channel_request_close)
                        self._bidir_pool[chid] = ch
                        self._pool_map[ch.get_channel_id()] = chid

                    # channel here is valid, exit out of attempts loop
                    break
                else:    # while loop didn't get a valid channel in X attempts
                    raise StandardError("Could not get a valid channel")

            else:
                ch = self._new_channel(ch_type, transport=transport)
            assert ch

        return ch
コード例 #17
0
    def create_dataset(self, dataset=None, parameter_dict=None, parameter_dictionary_id=''):
        
        if parameter_dict is not None:
            log.warning("Creating a parameter dictionary raw with coverage objects will soon be deprecated")
        
        if parameter_dictionary_id:
            parameter_dict = self._coverage_parameter_dictionary(parameter_dictionary_id)
            parameter_dict = parameter_dict.dump() # Serialize it

        
        dataset.coverage_version = 'UNSET'
        dataset_id, rev = self.clients.resource_registry.create(dataset)
        try:
            if dataset.coverage_type == CoverageTypeEnum.SIMPLEX:
                cov = self._create_coverage(dataset_id, dataset.description or dataset_id, parameter_dict)
                self._save_coverage(cov)
                cov.close()
            elif dataset.coverage_type == CoverageTypeEnum.COMPLEX:
                cov = self._create_complex_coverage(dataset_id, dataset.description or dataset_id, parameter_dict)
                cov.close()
            else:
                raise BadRequest("Unknown Coverage Type")

        except Exception:
            # Clean up dangling resource if there's no coverage
            self.delete_dataset(dataset_id)
            raise

        dataset.coverage_version = "TODO"
        dataset._id = dataset_id
        dataset._rev = rev
        self.update_dataset(dataset)

        log.debug('creating dataset: %s', dataset_id)

        #table loader create resource
        if dataset.visibility == ResourceVisibilityEnum.PUBLIC:
            log.debug('dataset visible: %s', dataset_id)
            if self._get_eoi_service_available() and parameter_dictionary_id:

                params = self.read_parameter_contexts(parameter_dictionary_id)
                param_defs = {}

                for p in params:                
                    param_defs[p.name] = {
                        "value_encoding" : p.value_encoding,
                        "parameter_type" : p.parameter_type,
                        "units" : p.units,
                        "standard_name" : p.name,
                        "display_name" : p.display_name,
                        "description" : p.description,
                        "fill_value" : p.fill_value
                    }

                self._create_single_resource(dataset_id, param_defs)

        self.clients.resource_registry.create_association(dataset_id, PRED.hasParameterDictionary, parameter_dictionary_id)

        return dataset_id
コード例 #18
0
    def create_stream(self, name='', exchange_point='', topic_ids=None, credentials=None, stream_definition_id='', description='', stream_name='', stream_type=''):
        # Argument Validation
        if name and self.clients.resource_registry.find_resources(restype=RT.Stream, name=name, id_only=True)[0]:
            raise Conflict("The named stream '%s' already exists on XP '%s'" % (name, exchange_point))
        validate_true(exchange_point, 'An exchange point must be specified')

        exchange_point_id = None
        if re.match(r'[0-9a-f]{32}', exchange_point):  # It's a uuid
            xp_obj = self.clients.exchange_management.read_exchange_point(exchange_point)
            exchange_point_id = exchange_point
            exchange_point = xp_obj.name
        else:
            self.container.ex_manager.create_xp(exchange_point)
            xp_objs, _ = self.clients.resource_registry.find_resources(restype=RT.ExchangePoint, name=exchange_point, id_only=True)
            if not xp_objs:
                raise BadRequest('failed to create an ExchangePoint: ' + exchange_point)
            exchange_point_id = xp_objs[0]

        topic_ids = topic_ids or []

        if not name: name = create_unique_identifier()

        # Get topic names and topics
        topic_names = []
        associated_topics = []
        for topic_id in topic_ids:
            topic = self.read_topic(topic_id)
            if topic.exchange_point == exchange_point:
                topic_names.append(self._sanitize(topic.name))
                associated_topics.append(topic_id)
            else:
                log.warning('Attempted to attach stream %s to topic %s with different exchange points', name, topic.name)

        stream = Stream(name=name, description=description)
        routing_key = '.'.join([self._sanitize(name)] + topic_names + ['stream'])
        if len(routing_key) > 255:
            raise BadRequest('There are too many topics for this.')

        stream.stream_route.exchange_point = exchange_point
        stream.stream_route.routing_key = routing_key
        #@todo: validate credentials
        stream.stream_route.credentials = credentials
        stream.stream_name = stream_name
        stream.stream_type = stream_type

        stream_id, rev = self.clients.resource_registry.create(stream)

        self._associate_stream_with_exchange_point(stream_id,exchange_point_id)

        if stream_definition_id: #@Todo: what if the stream has no definition?!
            self._associate_stream_with_definition(stream_id, stream_definition_id)

        for topic_id in associated_topics:
            self._associate_topic_with_stream(topic_id, stream_id)

        log.info('Stream %s: %s', name, routing_key)

        return stream_id, stream.stream_route
コード例 #19
0
    def __init__(self,
                 direct_access_type=None,
                 input_callback=None,
                 ip_address=None,
                 session_timeout=None,
                 inactivity_timeout=None):
        log.debug("DirectAccessServer.__init__()")

        if not direct_access_type:
            log.warning(
                "DirectAccessServer.__init__(): direct access type not specified"
            )
            raise ServerError(
                "DirectAccessServer.__init__(): direct access type not specified"
            )

        if not input_callback:
            log.warning(
                "DirectAccessServer.__init__(): callback not specified")
            raise ServerError(
                "DirectAccessServer.__init__(): callback not specified")

        if not ip_address:
            log.warning(
                "DirectAccessServer.__init__(): IP address not specified")
            raise ServerError(
                "DirectAccessServer.__init__(): IP address not specified")

        if not session_timeout:
            log.warning(
                "DirectAccessServer.__init__(): session timeout not specified")
            raise ServerError(
                "DirectAccessServer.__init__(): session timeout not specified")

        if not inactivity_timeout:
            log.warning(
                "DirectAccessServer.__init__(): inactivity timeout not specified"
            )
            raise ServerError(
                "DirectAccessServer.__init__(): inactivity timeout not specified"
            )

        # start the correct server based on direct_access_type
        if direct_access_type == DirectAccessTypes.telnet:
            self.server = TelnetServer(input_callback, ip_address)
        elif direct_access_type == DirectAccessTypes.vsp:
            self.server = SerialServer(input_callback, ip_address)
        else:
            raise ServerError(
                "DirectAccessServer.__init__(): Unsupported direct access type"
            )

        log.debug("DirectAccessServer.__init__(): starting timer greenlet")
        self.timer = gevent.spawn(self._timer_greenlet,
                                  session_timeout=session_timeout,
                                  inactivity_timeout=inactivity_timeout)
コード例 #20
0
ファイル: couchdb_datastore.py プロジェクト: wfrench/pyon
 def list_objects(self, datastore_name=""):
     if not datastore_name:
         datastore_name = self.datastore_name
     log.warning('Listing all objects in data store %s' % datastore_name)
     try:
         objs = [obj for obj in self.server[datastore_name]]
     except ValueError:
         raise BadRequest("Data store name %s invalid" % datastore_name)
     log.debug('Objects: %s' % str(objs))
     return objs
コード例 #21
0
    def update_resource_access_policy(self, resource_id):
        if self.policy_decision_point_manager is not None:

            try:
                policy_rules = self.policy_client.get_active_resource_access_policy_rules(resource_id)
                self.policy_decision_point_manager.load_resource_policy_rules(resource_id, policy_rules)
            except NotFound, e:
                #If the resource does not exist, just ignore it - but log a warning.
                log.warning("The resource %s is not found, so can't apply access policy" % resource_id)
                pass
コード例 #22
0
    def validate_compatibility(self,
                               data_process_definition_id='',
                               in_data_product_ids=None,
                               out_data_product_ids=None,
                               routes=None):
        '''
        Validates compatibility between input and output data products
        routes are in this form:
        { (in_data_product_id, out_data_product_id) : actor }
            if actor is None then the data process is assumed to use parameter functions.
            if actor is a TransformFunction, the validation is done at runtime
        '''
        if data_process_definition_id:
            input_stream_def_ids, _ = self.clients.resource_registry.find_objects(
                subject=data_process_definition_id,
                predicate=PRED.hasInputStreamDefinition,
                id_only=True)
            output_stream_def_ids, _ = self.clients.resource_registry.find_objects(
                subject=data_process_definition_id,
                predicate=PRED.hasStreamDefinition,
                id_only=True)
            for in_data_product_id in in_data_product_ids:
                input_stream_def = self.stream_def_from_data_product(
                    in_data_product_id)
                if input_stream_def not in input_stream_def_ids:
                    log.warning(
                        'Creating a data process with an unmatched stream definition input'
                    )
            for out_data_product_id in out_data_product_ids:
                output_stream_def = self.stream_def_from_data_product(
                    out_data_product_id)
                if output_stream_def not in output_stream_def_ids:
                    log.warning(
                        'Creating a data process with an unmatched stream definition output'
                    )

        if not out_data_product_ids and data_process_definition_id:
            return True
        if len(out_data_product_ids
               ) > 1 and not routes and not data_process_definition_id:
            raise BadRequest(
                'Multiple output data products but no routes defined')
        if len(out_data_product_ids) == 1:
            return all([
                self._validator(i, out_data_product_ids[0])
                for i in in_data_product_ids
            ])
        elif len(out_data_product_ids) > 1:
            for in_dp_id, out in routes.iteritems():
                for out_dp_id, actor in out.iteritems():
                    if not self._validator(in_dp_id, out_dp_id):
                        return False
            return True
        else:
            raise BadRequest('No input data products specified')
コード例 #23
0
def process_gateway_agent_request(resource_id, operation):

    try:

        if not resource_id:
            raise BadRequest("Am agent resource_id was not found in the URL")

        if operation == '':
            raise BadRequest("An agent operation was not specified in the URL")


        #Ensure there is no unicode
        resource_id = str(resource_id)
        operation = str(operation)

        #Retrieve json data from HTTP Post payload
        json_params = None
        if request.method == "POST":
            payload = request.form['payload']

            json_params = json_loads(str(payload))

            if not json_params.has_key('agentRequest'):
                raise Inconsistent("The JSON request is missing the 'agentRequest' key in the request")

            if not json_params['agentRequest'].has_key('agentId'):
                raise Inconsistent("The JSON request is missing the 'agentRequest' key in the request")

            if not json_params['agentRequest'].has_key('agentOp'):
                raise Inconsistent("The JSON request is missing the 'agentOp' key in the request")

            if json_params['agentRequest']['agentId'] != resource_id:
                raise Inconsistent("Target agent id in the JSON request (%s) does not match agent id in URL (%s)" % (str(json_params['agentRequest']['agentId']), resource_id) )

            if json_params['agentRequest']['agentOp'] != operation:
                raise Inconsistent("Target agent operation in the JSON request (%s) does not match agent operation in URL (%s)" % ( str(json_params['agentRequest']['agentOp']), operation ) )

        resource_agent = ResourceAgentClient(resource_id, node=Container.instance.node, process=service_gateway_instance)

        param_list = create_parameter_list('agentRequest', 'resource_agent', ResourceAgentProcessClient, operation, json_params)

        #Validate requesting user and expiry and add governance headers
        ion_actor_id, expiry = get_governance_info_from_request('agentRequest', json_params)
        ion_actor_id, expiry = validate_request(ion_actor_id, expiry)
        param_list['headers'] = build_message_headers(ion_actor_id, expiry)

        methodToCall = getattr(resource_agent, operation)
        result = methodToCall(**param_list)

        return gateway_json_response(result)

    except Exception, e:
        if e is NotFound:
            log.warning('The agent instance for id %s is not found.' % resource_id)
        return build_error_response(e)
コード例 #24
0
    def _coverage_to_granule(cls, coverage, start_time=None, end_time=None, stride_time=None, fuzzy_stride=True, parameters=None, stream_def_id=None, tdoa=None):
        slice_ = slice(None) # Defaults to all values


        # Validations
        if start_time is not None:
            validate_is_instance(start_time, Number, 'start_time must be a number for striding.')
        if end_time is not None:
            validate_is_instance(end_time, Number, 'end_time must be a number for striding.')
        if stride_time is not None:
            validate_is_instance(stride_time, Number, 'stride_time must be a number for striding.')

        if tdoa is not None and isinstance(tdoa,slice):
            slice_ = tdoa
        
        elif stride_time is not None and not fuzzy_stride: # SLOW 
            ugly_range = np.arange(start_time, end_time, stride_time)
            idx_values = [cls.get_time_idx(coverage,i) for i in ugly_range]
            idx_values = list(set(idx_values)) # Removing duplicates - also mixes the order of the list!!!
            idx_values.sort()
            slice_ = [idx_values]


        elif not (start_time is None and end_time is None):
            if start_time is not None:
                start_time = cls.get_time_idx(coverage,start_time)
            if end_time is not None:
                end_time = cls.get_time_idx(coverage,end_time)

            slice_ = slice(start_time,end_time,stride_time)
            log.info('Slice: %s', slice_)

        if stream_def_id:
            rdt = RecordDictionaryTool(stream_definition_id=stream_def_id)
        else:
            rdt = RecordDictionaryTool(param_dictionary=coverage.parameter_dictionary)
        if parameters is not None:
            # TODO: Improve efficiency here
            fields = list(set(parameters).intersection(rdt.fields))
        else:
            fields = rdt.fields

        if slice_.start == slice_.stop and slice_.start is not None:
            log.warning('Requested empty set of data.  %s', slice_)
            return rdt
        
        # Do time first
        tname = coverage.temporal_parameter_name
        cls.map_cov_rdt(coverage,rdt,tname, slice_)

        for field in fields:
            if field == tname:
                continue
            cls.map_cov_rdt(coverage,rdt,field, slice_)
        return rdt
コード例 #25
0
 def start(self):
     """
     Pass in a subscriber here, this will make it listen in a background greenlet.
     """
     assert not self._cbthread, "start called twice on EventSubscriber"
     gl = spawn(self.listen)
     self._cbthread = gl
     if not self._ready_event.wait(timeout=5):
         log.warning('EventSubscriber start timed out.')
     log.info("EventSubscriber started. Event pattern=%s" % self.binding)
     return gl
コード例 #26
0
 def register_dataset(self, data_product_id=''):
     procs,_ = self.clients.resource_registry.find_resources(restype=RT.Process, id_only=True)
     pid = None
     for p in procs:
         if 'registration_worker' in p:
             pid = p
     if not pid: 
         log.warning('No registration worker found')
         return
     rpc_cli = RPCClient(to_name=pid)
     rpc_cli.request({'data_product_id':data_product_id}, op='register_dap_dataset')
コード例 #27
0
ファイル: event.py プロジェクト: daf/pyon
 def start(self):
     """
     Pass in a subscriber here, this will make it listen in a background greenlet.
     """
     assert not self._cbthread, "start called twice on EventSubscriber"
     gl = spawn(self.listen)
     self._cbthread = gl
     if not self._ready_event.wait(timeout=5):
         log.warning('EventSubscriber start timed out.')
     log.info("EventSubscriber started. Event pattern=%s" % self.binding)
     return gl
コード例 #28
0
ファイル: couchdb_datastore.py プロジェクト: blazetopher/pyon
    def find_doc(self, criteria=[], datastore_name=""):
        if not datastore_name:
            datastore_name = self.datastore_name
        try:
            db = self.server[datastore_name]
        except ValueError:
            raise BadRequest("Data store name %s is invalid" % datastore_name)

        if len(criteria) == 0:
            # Return set of all objects indexed by doc id
            map_fun =\
'''function(doc) {
    emit(doc._id, doc);
}'''
        else:
            map_fun =\
'''function(doc) {
    if ('''
            for criterion in criteria:
                if isinstance(criterion, list):
                    if len(criterion) != 3:
                        raise BadRequest("Insufficient criterion values specified.  Much match [<field>, <logical constant>, <value>]")
                    for item in criterion:
                        if not isinstance(item, str):
                            raise BadRequest("All criterion values must be strings")
                    map_fun += "doc." + criterion[0]
                    map_fun += " " + criterion[1] + " "
                    map_fun += "\"" + criterion[2] + "\""
                else:
                    if criterion == DataStore.AND:
                        map_fun += ' && '
                    else:
                        map_fun += ' || '

            map_fun +=\
''') {
        emit(doc._id, doc);
    }
}'''

        log.debug("map_fun: %s" % str(map_fun))
        try:
            queryList = list(db.query(map_fun))
        except ResourceNotFound:
            raise NotFound("Data store query for criteria %s failed" % str(criteria))
        if len(queryList) == 0:
            raise NotFound("Data store query for criteria %s returned no objects" % str(criteria))
        results = [row.value for row in queryList]

        log.debug('Find results: %s' % str(results))
        log.warning('Find is an expensive debug only function. Use a specific find function instead.')
        return results
コード例 #29
0
 def receive_callback(self, *a, **b):
     """ wrapper to capture errors """
     try:
         self.recv_packet(*a,**b)
     except:
         # try to get the individual transform's logger
         try:
             log = logging.getLogger(self.__class__.__module__)
         except:
             pass
         # but settle for ion.core.process.transform's logger if you have to
         log.warning('transform %s failed to handle message', self.id, exc_info=True)
         raise
コード例 #30
0
 def _write(self, text):
     log.debug("TcpServer._write(): text = " + str(text))
     if self.connection_socket:
         self.activity_seen = True;
         MSGLEN = len(text)
         total_sent = 0
         while total_sent < MSGLEN:
             sent = self.connection_socket.send(text[total_sent:])
             if sent == 0:
                 raise RuntimeError("socket connection broken")
             total_sent = total_sent + sent
     else:
         log.warning("TcpServer._write(): no connection yet, can not write text")            
コード例 #31
0
 def _write(self, text):
     log.debug("TcpServer._write(): text = " + str(text))
     if self.connection_socket:
         self.activity_seen = True
         MSGLEN = len(text)
         total_sent = 0
         while total_sent < MSGLEN:
             sent = self.connection_socket.send(text[total_sent:])
             if sent == 0:
                 raise RuntimeError("socket connection broken")
             total_sent = total_sent + sent
     else:
         log.warning(
             "TcpServer._write(): no connection yet, can not write text")
コード例 #32
0
 def _write(self, data):
     # write data to tcp client
     log.debug("TcpServer._write(): data = " + str(data))
     if self.connection_socket:
         self.activity_seen = True;
         MSGLEN = len(data)
         total_sent = 0
         while total_sent < MSGLEN:
             sent = self.connection_socket.send(data[total_sent:])
             if sent == 0:
                 raise RuntimeError("socket connection broken")
             total_sent = total_sent + sent
     else:
         log.warning("TcpServer._write(): no connection yet, can not write data")            
コード例 #33
0
 def _write(self, data):
     # write data to tcp client
     log.debug("TcpServer._write(): data = " + str(data))
     if self.connection_socket:
         self.activity_seen = True;
         MSGLEN = len(data)
         total_sent = 0
         while total_sent < MSGLEN:
             sent = self.connection_socket.send(data[total_sent:])
             if sent == 0:
                 raise RuntimeError("socket connection broken")
             total_sent = total_sent + sent
     else:
         log.warning("TcpServer._write(): no connection yet, can not write data")            
コード例 #34
0
    def update_resource_access_policy(self, resource_id):
        if self.policy_decision_point_manager is not None:

            try:
                policy_rules = self.policy_client.get_active_resource_access_policy_rules(
                    resource_id)
                self.policy_decision_point_manager.load_resource_policy_rules(
                    resource_id, policy_rules)
            except NotFound, e:
                #If the resource does not exist, just ignore it - but log a warning.
                log.warning(
                    "The resource %s is not found, so can't apply access policy"
                    % resource_id)
                pass
コード例 #35
0
    def get_coverage_parameter(cls, parameter_context):
        """
        Creates a Coverage Model based Parameter Context given the 
        ParameterContext IonObject.

        Note: If the parameter is a parameter function and depends on dynamically
        created calibrations, this will fail.
        """
        # Only CF and netCDF compliant variable names
        parameter_context.name = re.sub(r'[^a-zA-Z0-9_]', '_',
                                        parameter_context.name)
        from ion.services.dm.utility.types import TypesManager
        # The TypesManager does all the parsing and converting to the coverage model instances
        tm = TypesManager(None, {}, {})
        # First thing to do is create the parameter type
        param_type = tm.get_parameter_type(
            parameter_context.parameter_type, parameter_context.value_encoding,
            parameter_context.code_report,
            parameter_context.parameter_function_id,
            parameter_context.parameter_function_map, {
                'name': parameter_context.name,
                'target_dataset': parameter_context.target_dataset,
                'target_name': parameter_context.target_name
            })
        # Ugh, I hate it but I did copy this section from
        # ion/processes/bootstrap/ion_loader.py
        context = ParameterContext(name=parameter_context.name,
                                   param_type=param_type)
        # Now copy over all the attrs
        context.uom = parameter_context.units
        try:
            if isinstance(context.uom, basestring):
                tm.get_unit(context.uom)
        except UdunitsError:
            log.warning('Parameter %s has invalid units: %s',
                        parameter_context.name, context.uom)
        # Fill values can be a bit tricky...
        context.fill_value = tm.get_fill_value(
            parameter_context.fill_value, parameter_context.value_encoding,
            param_type)
        context.reference_urls = parameter_context.reference_urls
        context.internal_name = parameter_context.name
        context.display_name = parameter_context.display_name
        context.standard_name = parameter_context.standard_name
        context.ooi_short_name = parameter_context.ooi_short_name
        context.description = parameter_context.description
        context.precision = parameter_context.precision
        context.visible = parameter_context.visible
        return context
コード例 #36
0
def lrt_parser(document):
    sio = StringIO()
    sio.write(document)
    sio.seek(0)

    with ZipFile(sio) as zp:
        files = zp.namelist()

        tables = {}
        for f in files:
            if os.path.basename(f) != 'master.csv':
                with zp.open(f) as t:
                    headers = reader(t).next()
                    data = np.genfromtxt(t, delimiter=',')

                datlimz = data[:, :-2]
                datlim = data[:, -2:]

                tables[os.path.basename(f)] = {
                    'datlim': datlim,
                    'datlimz': datlimz,
                    'dims': headers[:-2]
                }

            else:
                master = f

        with zp.open(f) as master:
            dr = DictReader(master)

            for row in dr:
                key = '_'.join(
                    ['lrt', row['Reference Designator'], row['Data Product']])

                doc = {}
                doc['array'] = row['Array']
                doc['instrument_class'] = row['Instrument Class']
                doc['reference_designator'] = row['Reference Designator']
                doc['data_product'] = row['Data Product']
                lookup_key = row['Lookup Table ID']
                if not lookup_key in tables:
                    log.warning('Table not found while parsing LRT file: %s',
                                lookup_key)
                    continue
                doc['datlim'] = tables[lookup_key]['datlim'].tolist()
                doc['datlimz'] = tables[lookup_key]['datlimz'].tolist()
                doc['dims'] = tables[lookup_key]['dims']
                yield key, doc
    return
コード例 #37
0
 def receive_callback(self, *a, **b):
     """ wrapper to capture errors """
     try:
         self.recv_packet(*a, **b)
     except:
         # try to get the individual transform's logger
         try:
             log = logging.getLogger(self.__class__.__module__)
         except:
             pass
         # but settle for ion.core.process.transform's logger if you have to
         log.warning('transform %s failed to handle message',
                     self.id,
                     exc_info=True)
         raise
コード例 #38
0
    def _extract_parameters_from_stream(self, msg, field):

        tokens = msg.split(" ")

        try:
            for token in tokens:
                token = token.strip()
                if token == "=":
                    i = tokens.index(token)
                    if tokens[i - 1] == field:
                        return int(tokens[i + 1].strip())
        except IndexError:
            log.warning("Could not extract value from the message. Please check its format.")

        return self.value
コード例 #39
0
    def _get_lookup_value(self, lookup_value):
        if not self.new_lookups.empty():
            new_values = self.new_lookups.get()
            self.lookup_docs = new_values + self.lookup_docs

        lookup_value_document_keys = self.lookup_docs
        for key in lookup_value_document_keys:
            try:
                document = self.stored_values.read_value(key)
                if lookup_value in document:
                    return document[lookup_value]
            except NotFound:
                log.warning('Specified lookup document does not exist')

        return None
コード例 #40
0
ファイル: service.py プロジェクト: ateranishi/pyon
    def load_service_mods(cls, path):
        import pkgutil
        import string
        mod_prefix = string.replace(path, "/", ".")

        for mod_imp, mod_name, is_pkg in pkgutil.iter_modules([path]):
            if is_pkg:
                cls.load_service_mods(path + "/" + mod_name)
            else:
                mod_qual = "%s.%s" % (mod_prefix, mod_name)
                #print "Import", mod_qual
                try:
                    named_any(mod_qual)
                except Exception, ex:
                    log.warning("Import module '%s' failed: %s" % (mod_qual, ex))
コード例 #41
0
ファイル: service.py プロジェクト: swarbhanu/pyon
    def load_service_mods(cls, path):
        import pkgutil
        import string
        mod_prefix = string.replace(path, "/", ".")

        for mod_imp, mod_name, is_pkg in pkgutil.iter_modules([path]):
            if is_pkg:
                cls.load_service_mods(path+"/"+mod_name)
            else:
                mod_qual = "%s.%s" % (mod_prefix, mod_name)
                #print "Import", mod_qual
                try:
                    named_any(mod_qual)
                except Exception, ex:
                    log.warning("Import module '%s' failed: %s" % (mod_qual, ex))
コード例 #42
0
    def _get_lookup_value(self, lookup_value):
        if not self.new_lookups.empty():
            new_values = self.new_lookups.get()
            self.lookup_docs = new_values + self.lookup_docs

        lookup_value_document_keys = self.lookup_docs
        for key in lookup_value_document_keys:
            try:
                document = self.stored_values.read_value(key)
                if lookup_value in document:
                    return document[lookup_value]
            except NotFound:
                log.warning('Specified lookup document does not exist')

        return None
コード例 #43
0
    def __init__(self, direct_access_type=None, input_callback=None):
        log.debug("DirectAccessServer.__init__()")

        if not direct_access_type:
            log.warning("DirectAccessServer.__init__(): direct access type not specified")
            raise ServerError("direct access type not specified")

        if not input_callback:
            log.warning("DirectAccessServer.__init__(): callback not specified")
            raise ServerError("callback not specified")
               
        # start the correct server based on direct_access_type
        if direct_access_type == DirectAccessTypes.telnet:
            self.server = TelnetServer(input_callback)
        else:
            raise ServerError("Unsupported direct access type")
コード例 #44
0
    def register_dataset(self, dataset_id='', external_data_product_name=''):
        dataset_obj = self.read_dataset(dataset_id)
        dataset_obj.registered = True
        self.update_dataset(dataset=dataset_obj)
        external_data_product_name = external_data_product_name or dataset_obj.name

        procs,_ = self.clients.resource_registry.find_resources(restype=RT.Process, id_only=True)
        pid = None
        for p in procs:
            if 'registration_worker' in p:
                pid = p
        if not pid: 
            log.warning('No registration worker found')
            return
        rpc_cli = RPCClient(to_name=pid)
        rpc_cli.request({'dataset_id':dataset_id, 'data_product_name':external_data_product_name}, op='register_dap_dataset')
コード例 #45
0
ファイル: service.py プロジェクト: edwardhunter/scioncc
    def load_service_mods(cls, path, package=""):
        if isinstance(path, ModuleType):
            for p in path.__path__:
                cls.load_service_mods(p, path.__name__)
            return

        import pkgutil
        for mod_imp, mod_name, is_pkg in pkgutil.iter_modules([path]):
            if is_pkg:
                cls.load_service_mods(path + "/" + mod_name, package + "." + mod_name)
            else:
                mod_qual = "%s.%s" % (package, mod_name)
                try:
                    named_any(mod_qual)
                except Exception as ex:
                    log.warning("Import module '%s' failed: %s" % (mod_qual, ex))
コード例 #46
0
        def reload_user_info(event_msg, headers):
            '''
            Callback method for the subscriber to ReloadUserInfoEvent
            '''

            notification_id =  event_msg.notification_id
            log.debug("(UNS instance received a ReloadNotificationEvent. The relevant notification_id is %s" % notification_id)

            try:
                self.user_info = self.load_user_info()
            except NotFound:
                log.warning("ElasticSearch has not yet loaded the user_index.")

            self.reverse_user_info =  calculate_reverse_user_info(self.user_info)

            log.debug("(UNS instance) After a reload, the user_info: %s" % self.user_info)
            log.debug("(UNS instance) The recalculated reverse_user_info: %s" % self.reverse_user_info)
コード例 #47
0
    def start_app_from_url(self, app_url="", config=None):
        """
        @brief Read the app file and call start_app
        """
        log.debug("AppManager.start_app_from_url(app_url=%s) ..." % app_url)

        try:
            app = Config([app_url]).data
            self.start_app(appdef=app, config=config)
            return True
        except ConfigNotFound as cnf:
            log.warning("Could not find container app file '%s'" % app_url)
        except Exception as ex:
            log.exception("Could not start app file %s" % app_url)
            raise ContainerAppError(ex.message)

        return False
コード例 #48
0
        def reload_user_info(event_msg, headers):
            '''
            Callback method for the subscriber to ReloadUserInfoEvent
            '''

            notification_id =  event_msg.notification_id
            log.debug("(UNS instance received a ReloadNotificationEvent. The relevant notification_id is %s" % notification_id)

            try:
                self.user_info = self.load_user_info()
            except NotFound:
                log.warning("ElasticSearch has not yet loaded the user_index.")

            self.reverse_user_info =  calculate_reverse_user_info(self.user_info)

            log.debug("(UNS instance) After a reload, the user_info: %s" % self.user_info)
            log.debug("(UNS instance) The recalculated reverse_user_info: %s" % self.reverse_user_info)
コード例 #49
0
    def load_user_info(self):
        """
        Method to load the user info dictionary used by the notification workers and the UNS

        @retval user_info dict
        """
        users, _ = self.clients.resource_registry.find_resources(
            restype=RT.UserInfo)

        user_info = {}

        if not users:
            return {}

        for user in users:
            notifications = []
            notifications_disabled = False
            notifications_daily_digest = False

            #retrieve all the active notifications assoc to this user
            notifications = self.get_user_notifications(user_info_id=user)
            log.debug('load_user_info notifications:   %s', notifications)

            for variable in user.variables:
                if type(variable) is dict and variable.has_key('name'):

                    if variable['name'] == 'notifications_daily_digest':
                        notifications_daily_digest = variable['value']

                    if variable['name'] == 'notifications_disabled':
                        notifications_disabled = variable['value']

                else:
                    log.warning(
                        'Invalid variables attribute on UserInfo instance. UserInfo: %s',
                        user)

            user_info[user._id] = {
                'user_contact': user.contact,
                'notifications': notifications,
                'notifications_daily_digest': notifications_daily_digest,
                'notifications_disabled': notifications_disabled
            }

        return user_info
コード例 #50
0
    def load_service_mods(cls, path, package=""):
        if isinstance(path, ModuleType):
            for p in path.__path__:
                cls.load_service_mods(p, path.__name__)
            return

        import pkgutil
        for mod_imp, mod_name, is_pkg in pkgutil.iter_modules([path]):
            if is_pkg:
                cls.load_service_mods(path + "/" + mod_name,
                                      package + "." + mod_name)
            else:
                mod_qual = "%s.%s" % (package, mod_name)
                try:
                    named_any(mod_qual)
                except Exception as ex:
                    log.warning("Import module '%s' failed: %s" %
                                (mod_qual, ex))
コード例 #51
0
    def start_rel_from_url(self, rel_url="", config=None):
        """
        @brief Read the rel file and call start_rel
        """
        log.info("AppManager.start_rel_from_url(rel_url=%s) ...", rel_url)

        try:
            rel = Config([rel_url]).data
            self.start_rel(rel, config)
            log.debug("AppManager.start_rel_from_url(rel_url=%s) done,  OK.", rel_url)
            return True
        except ConfigNotFound as cnf:
            log.warning("Could not find container deploy file '%s'", rel_url)
        except Exception as ex:
            log.exception("Could not start container deploy file '%s'", rel_url)
            raise ContainerAppError(ex.message)

        return False
コード例 #52
0
    def _cov2granule(cls, coverage, start_time=None, end_time=None, stride_time=None, stream_def_id=None, parameters=None, tdoa=None, sort_parameter=None):
        # Deal with the NTP
        if start_time:
            start_time += 2208988800
        if end_time:
            end_time += 2208988800

        if tdoa is None:
            if start_time is None and end_time is None:
                data_dict = coverage.get_parameter_values(param_names=parameters, stride_length=stride_time, fill_empty_params=True, sort_parameter=sort_parameter).get_data()
            else:
                data_dict = coverage.get_parameter_values(param_names=parameters, time_segment=(start_time, end_time), stride_length=stride_time, fill_empty_params=True, sort_parameter=sort_parameter).get_data()
        elif isinstance(tdoa, slice):
            log.warning("Using tdoa argument on large datasets can consume too much memory")
            data_dict = coverage.get_parameter_values(param_names=parameters, fill_empty_params=True).get_data()
            data_dict = { k : v[tdoa] for k,v in data_dict.iteritems() }
        else:
            raise TypeError("tdoa is incorrect type: %s" % type(tdoa))

        return cls._data_dict_to_rdt(data_dict, stream_def_id, coverage)
コード例 #53
0
    def delete_subscription(self, subscription_id=''):
        if self.subscription_is_active(subscription_id):
            raise BadRequest('Clients can not delete an active subscription.')

        xn_objs, assocs = self.clients.resource_registry.find_subjects(
            object=subscription_id,
            predicate=PRED.hasSubscription,
            id_only=False)
        if len(xn_objs) > 1:
            log.warning('Subscription %s was attached to multiple queues')
        self._deassociate_subscription(subscription_id)

        for xn_obj in xn_objs:
            subscriptions, assocs = self.clients.resource_registry.find_objects(
                subject=xn_obj, predicate=PRED.hasSubscription, id_only=True)
            if not subscriptions:
                self.clients.exchange_management.undeclare_exchange_name(
                    xn_obj._id)

        self.clients.resource_registry.delete(subscription_id)
        return True
コード例 #54
0
ファイル: service.py プロジェクト: swarbhanu/pyon
    def add_servicedef_entry(self, name, key, value, append=False):
        if not name:
            #log.warning("No name for key=%s, value=%s" % (key, value))
            return

        if not name in self.services:
            svc_def = IonServiceDefinition(name)
            self.services[name] = svc_def
        else:
            svc_def = self.services[name]

        oldvalue = getattr(svc_def, key, None)
        if oldvalue is not None:
            if append:
                assert type(oldvalue) is list, "Cannot append to non-list: %s" % oldvalue
                oldvalue.append(value)
            else:
                log.warning("Service %s, key=%s exists. Old=%s, new=%s" % (name, key, getattr(svc_def, key), value))

        if not append:
            setattr(svc_def, key, value)
コード例 #55
0
    def register_dataset(self, dataset_id='', external_data_product_name=''):
        dataset_obj = self.read_dataset(dataset_id)
        dataset_obj.registered = True
        self.update_dataset(dataset=dataset_obj)
        external_data_product_name = external_data_product_name or dataset_obj.name

        procs, _ = self.clients.resource_registry.find_resources(
            restype=RT.Process, id_only=True)
        pid = None
        for p in procs:
            if 'registration_worker' in p:
                pid = p
        if not pid:
            log.warning('No registration worker found')
            return
        rpc_cli = RPCClient(to_name=pid)
        rpc_cli.request(
            {
                'dataset_id': dataset_id,
                'data_product_name': external_data_product_name
            },
            op='register_dap_dataset')
コード例 #56
0
 def __getitem__(self, name):
     """
     Get an item by nick name from the record dictionary.
     """
     if self._available_fields and name not in self._available_fields:
         raise KeyError(name)
     if self._rd[name] is not None:
         context = self._pdict.get_context(name)
         if isinstance(context.param_type, ParameterFunctionType):
             return self._rd[name].memoized_values[:]
         return self._rd[name][:]
     ptype = self._pdict.get_context(name).param_type
     if isinstance(ptype, ParameterFunctionType):
         try:
             pfv = get_value_class(ptype, self.domain)
             pfv._pval_callback = self._pval_callback
             retval = pfv[:]
             return retval
         except Exception as e:
             log.warning(e.message)
             return None
     else:
         return None
コード例 #57
0
    def _build_arg_map(self, name, ptype, return_dict=False):
        # get the arg list
        arg_list = ptype.function.arg_list
        # the map
        arg_map = ptype.function.param_map
        # get the arrays for each
        array_map = {}
        for k, v in arg_map.iteritems():
            if isinstance(v, basestring):

                array_value = self[v]
                if array_value is None:
                    log.warning("Missing inputs for parameter function %s",
                                name)
                    return None
                array_map[k] = array_value
            else:
                array_map[k] = v

        if return_dict:
            return array_map

        return [array_map[i] for i in arg_list]
コード例 #58
0
    def _data_dict_to_rdt(cls, data_dict, stream_def_id, coverage):
        if stream_def_id:
            rdt = RecordDictionaryTool(stream_definition_id=stream_def_id)
        else:
            rdt = RecordDictionaryTool(param_dictionary=coverage.parameter_dictionary)
        if not data_dict:
            log.warning('Retrieve returning empty set')
            return rdt

        if 'time' in data_dict and data_dict['time'].shape[0] == 0:
            log.warning('Retrieve returning empty set')
            return rdt


        rdt[coverage.temporal_parameter_name] = data_dict[coverage.temporal_parameter_name]
        for field in rdt.fields:
            if field == coverage.temporal_parameter_name:
                continue
            # The values have already been inside a coverage so we know they're safe and they exist, so they can be inserted directly.
            if field in data_dict:
                rdt._rd[field] = data_dict[field]
            #rdt[k] = v

        return rdt
コード例 #59
0
def process_oms_event():
    if not request.data:
        log.warning('process_oms_event: invalid OMS event payload: %r',
                    request.data)
        return gateway_json_response(OMS_BAD_REQUEST_RESPONSE)

    payload = json_loads(str(request.data))
    if not isinstance(payload, list):
        log.warning(
            'process_oms_event: invalid OMS event payload: '
            'expecting array but got: %r', payload)
        return gateway_json_response(OMS_BAD_REQUEST_RESPONSE)

    log.debug('process_oms_event: payload=%s', payload)

    event_publisher = EventPublisher()

    for obj in payload:
        for k in ['event_id', 'platform_id', 'message']:
            if k not in obj:
                log.warning(
                    'process_oms_event: invalid OMS event: %r missing. '
                    'Received object: %s', k, obj)
                #return gateway_json_response(OMS_BAD_REQUEST_RESPONSE)

        # note the the external event_id is captured in the sub_type field:
        evt = dict(event_type='OMSDeviceStatusEvent',
                   origin_type='OMS Platform',
                   origin=obj.get('platform_id', 'platform_id NOT PROVIDED'),
                   sub_type=obj.get('event_id', 'event_id NOT PROVIDED'),
                   description=obj.get('message', ''),
                   status_details=obj)
        try:
            event_publisher.publish_event(**evt)
            log.debug('process_oms_event: published: %s', evt)

        except Exception as e:
            log.exception('process_oms_event: could not publish OMS event: %s',
                          evt)

    return gateway_json_response(OMS_ACCEPTED_RESPONSE)
コード例 #60
0
    def __init__(self, input_callback=None, ip_address=None):
        log.debug("TcpServer.__init__(): IP address = %s" % ip_address)

        # save callback if specified
        if not input_callback:
            log.warning("TcpServer.__init__(): callback not specified")
            raise ServerError("TcpServer.__init__(): callback not specified")
        self.parent_input_callback = input_callback

        # save ip address if specified
        if not ip_address:
            log.warning("TcpServer.__init__(): IP address not specified")
            raise ServerError("TcpServer.__init__(): IP address not specified")
        self.ip_address = ip_address

        # search for available port
        self.port = self.PORT_RANGE_LOWER
        # create a TCP socket
        self.server_socket = gevent.socket.socket()
        self.server_socket.allow_reuse_address = True
        while True:
            try:
                log.debug("trying to bind to port %s on %s" %
                          (str(self.port), self.ip_address))
                self.server_socket.bind((self.ip_address, self.port))
                break
            except Exception as ex:
                log.debug("exception caught for socket bind:" + str(ex))
                self.port = self.port + 1
                if self.port > self.PORT_RANGE_UPPER:
                    log.warning(
                        "TcpServer.__init__(): no available ports for server")
                    raise ServerError(
                        "TcpServer.__init__(): no available ports")
                    return

        # create token
        self.token = str(uuid.uuid4()).upper()

        log.debug("TcpServer.__init__(): starting server greenlet")
        self.server = gevent.spawn(self._server_greenlet)