def _start_driver(self, dvr_config):
     try:
         self.set_configuration(dvr_config)
     except:
         log.error('error in configuration', exc_info=True)
         raise
     self._dvr_client = self
Esempio n. 2
0
 def run(self):
     try:
         while not self._shutdown_now.is_set():
             self._check_condition()
             self._shutdown_now.wait(self.polling_interval)
     except:
         log.error('thread failed', exc_info=True)
Esempio n. 3
0
    def _build_parsed_values(self):
        """
        Take the clock data and parse it into
        values with appropriate tags.
        @throws SampleException If there is a problem with sample creation
        """
        try:
            minutes, seconds, day, hour, year, month, _ = struct.unpack(
                '<6B2s', self.raw_data)
        except Exception as e:
            log.error('Error creating particle clock data raw data: %r',
                      self.raw_data)
            raise SampleException(e)

        minutes = int('%02x' % minutes)
        seconds = int('%02x' % seconds)
        day = int('%02x' % day)
        hour = int('%02x' % hour)
        year = int('%02x' % year)
        month = int('%02x' % month)

        result = [{
            VID: NortekEngClockDataParticleKey.DATE_TIME_ARRAY,
            VAL: [minutes, seconds, day, hour, year, month]
        }]

        log.debug('NortekEngClockDataParticle: particle=%r', result)
        return result
    def set_configuration(self, config):
        log.warn("DRIVER: set_configuration")
        """
        expect configuration to have:
        - parser module/class
        - directory, wildcard to find data files
        - optional timestamp of last granule
        - optional poll rate
        - publish info
        """
        log.error("Log level: %s", log.getEffectiveLevel())
        log.debug('using configuration: %s', config)
        self.config = config
        self.max_records = get_safe(config, 'max_records', 100)
        self.stream_config = self.CFG.get('stream_config', {})
        if len(self.stream_config) == 1:
            stream_cfg = self.stream_config.values()[0]
        elif len(self.stream_config) > 1:
            stream_cfg = self.stream_config.values()[0]

        stream_id = stream_cfg['stream_id']
        stream_route = IonObject(OT.StreamRoute, routing_key=stream_cfg['routing_key'], exchange_point=stream_cfg['exchange_point'])
        param_dict = stream_cfg['stream_def_dict']['parameter_dictionary']
        self.publisher = StandaloneStreamPublisher(stream_id=stream_id, stream_route=stream_route)
        self.parameter_dictionary = ParameterDictionary.load(param_dict)
        self.time_field = self.parameter_dictionary.get_temporal_context()
        self.latest_granule_time = get_safe(config, 'last_time', 0)
Esempio n. 5
0
    def save_doc_mult(self, docs, object_ids=None, datastore_name=None):
        if type(docs) is not list:
            raise BadRequest("Invalid type for docs: %s" % type(docs))
        if not docs:
            return []

        if object_ids:
            for doc, oid in zip(docs, object_ids):
                doc["_id"] = oid
        else:
            for doc in docs:
                doc["_id"] = doc.get("_id", None) or self.get_unique_id()

        ds, datastore_name = self._get_datastore(datastore_name)
        res = self._save_doc_mult(ds, docs)

        self._count(create_mult_call=1, create_mult_obj=len(docs))
        if not all([success for success, oid, rev in res]):
            errors = ["%s:%s" % (oid, rev) for success, oid, rev in res if not success]
            log.error('create_doc_mult had errors. Successful: %s, Errors: %s'
                      % (len(res) - len(errors), "\n".join(errors)))
        else:
            log.debug('create_doc_mult successfully created %s documents', len(res))

        return res
Esempio n. 6
0
 def exception_callback(self, exception):
     """
     Callback passed to the driver which handles exceptions raised when
     in streaming mode.
     """
     log.error('Exception detected in the driver', exc_info=True)
     self._fsm.on_event(ResourceAgentEvent.LOST_CONNECTION)
Esempio n. 7
0
def render_service_gateway_response(service_gateway_resp, raw_return=None):
    log.trace('1 response: %r', service_gateway_resp)
    if service_gateway_resp.status_code == 200:
        resp = json.loads(service_gateway_resp.content)
        try:
            log.trace('2 loads: %r', resp)
            data = resp['data']
            log.trace('3 data: %r', data)
            response = data['GatewayResponse']
            log.trace('4 response: %r', response)
            if raw_return:
                log.trace('5 returning')
                return response
            elif isinstance(response, list):
                zero = response[0]
                log.trace('5 returning zero: %r', zero)
                return zero
            else:
                log.trace('5 returning')
                return response
        except Exception, e:
            log.error('exception extracting result: %s\nresponse: %r', e, resp, exc_info=True)
            data = resp['data']
            log.trace('3 data: %r', data)
            return data
Esempio n. 8
0
    def _create_driver_plugin(self):
        try:
            # Ensure the egg cache directory exists. ooi.reflections will fail
            # somewhat silently when this directory doesn't exists.
            if not os.path.isdir(EGG_CACHE_DIR):
                os.makedirs(EGG_CACHE_DIR)

            log.debug("getting plugin config")
            uri = get_safe(self._dvr_config, 'dvr_egg')
            module_name = self._dvr_config['dvr_mod']
            class_name = self._dvr_config['dvr_cls']
            config = self._dvr_config['startup_config']
        except:
            log.error('error in configuration', exc_info=True)
            raise

        egg_name = None
        egg_repo = None
        memento = self._get_state(DSA_STATE_KEY)


        log.warn("Get driver object: %s, %s, %s, %s, %s", class_name, module_name, egg_name, egg_repo, memento)
        if uri:
            egg_name = uri.split('/')[-1] if uri.startswith('http') else uri
            egg_repo = uri[0:len(uri)-len(egg_name)-1] if uri.startswith('http') else None

        log.info("instantiate driver plugin %s.%s", module_name, class_name)
        params = [config, memento, self.publish_callback, self.persist_state_callback, self.exception_callback]
        return EGG_CACHE.get_object(class_name, module_name, egg_name, egg_repo, params)
Esempio n. 9
0
def service_gateway_get(service_name, operation_name, params={}):
    resp = requests.get(build_get_request(service_name, operation_name, params))
    try:
        return render_service_gateway_response(resp)
    except:
        log.error('gateway error with response %r', resp, exc_info=True)
        raise
Esempio n. 10
0
 def publish_callback(self, particle):
     for p in particle:
         try:
             log.info("Particle received: %s", p.generate())
             self._async_driver_event_sample(p.generate(), None)
         except:
             log.error("Error logging particle", exc_info=True)
    def _find_top_site_device(self, deployment_id):
        top_site = ''
        top_device = ''
        #retrieve the site tree information using the OUTIL functions; site info as well has site children
        deploy_items_objs, _ = self.clients.resource_registry.find_subjects(
            predicate=PRED.hasDeployment, object=deployment_id, id_only=False)
        log.debug("site_ids associated to this deployment: %s",
                  deploy_items_objs)
        for obj in deploy_items_objs:
            rsrc_type = obj.type_
            log.debug("resource type associated to this deployment:: %s",
                      rsrc_type)
            if RT.PlatformDevice == rsrc_type or RT.InstrumentDevice == rsrc_type:
                top_device = obj
            elif RT.PlatformSite == rsrc_type or RT.InstrumentSite == rsrc_type:
                top_site = obj
            else:
                log.error(
                    'Deployment may only link to devices and sites. Deployment: %s',
                    str(self.deployment_obj))

        if not top_device or not top_site:
            log.error(
                'Deployment must associate to both site and device. Deployment: %s',
                str(self.deployment_obj))
            raise BadRequest(
                'Deployment must associate to both site and device. Deployment: %s',
                str(self.deployment_obj))

        return top_site, top_device
Esempio n. 12
0
        def recv_evt_messages(driver_client):
            """
            A looping function that monitors a ZMQ SUB socket for asynchronous
            driver events. Can be run as a thread or greenlet.
            @param driver_client The client object that launches the thread.
            """
            context = zmq.Context()
            sock = context.socket(zmq.SUB)
            sock.connect(driver_client.event_host_string)
            sock.setsockopt(zmq.SUBSCRIBE, '')
            log.info('Driver client event thread connected to %s.' %
                  driver_client.event_host_string)

            driver_client.stop_event_thread = False
            #last_time = time.time()
            while not driver_client.stop_event_thread:
                try:
                    evt = sock.recv_pyobj(flags=zmq.NOBLOCK)
                    log.debug('got event: %s' % str(evt))
                    if driver_client.evt_callback:
                        driver_client.evt_callback(evt)
                except zmq.ZMQError:
                    time.sleep(.5)
                except Exception, e:
                    log.error('Driver client error reading from zmq event socket: ' + str(e))
                    log.error('Driver client error type: ' + str(type(e)))                    
Esempio n. 13
0
    def construct_protocol(self, proto_module):
        module = importlib.import_module(proto_module)
        if hasattr(module, 'create_playback_protocol'):
            return module.create_playback_protocol(self.handle_event)

        log.error('Unable to import and create playback protocol from module: %r', module)
        sys.exit(1)
Esempio n. 14
0
    def _process_packet(self):
        chunk = self._filehandle.read(1024)
        if chunk != '':
            self.buffer += chunk
            new_index = 0
            for match in self.ooi_ts_regex.finditer(self.buffer):
                payload = match.group(2)
                try:
                    packet_time = string_to_ntp_date_time(match.group(1))
                    header = PacketHeader(
                        packet_type=PacketType.FROM_INSTRUMENT,
                        payload_size=len(payload),
                        packet_time=packet_time)
                    header.set_checksum(payload)
                    packet = PlaybackPacket(payload=payload, header=header)
                    self.callback(packet)
                except ValueError:
                    log.error('Unable to extract timestamp from record: %r' %
                              match.group())
                new_index = match.end()

            if new_index > 0:
                self.buffer = self.buffer[new_index:]

            if len(self.buffer) > self.MAXBUF:
                self.buffer = self.buffer[-self.MAXBUF:]

            return True

        return False
Esempio n. 15
0
 def publish_callback(self, particle):
     for p in particle:
         try:
             log.info("Particle received: %s", p.generate())
             self._async_driver_event_sample(p.generate(), None)
         except:
             log.error("Error logging particle", exc_info=True)
Esempio n. 16
0
    def _create_driver_plugin(self):
        try:
            # Ensure the egg cache directory exists. ooi.reflections will fail
            # somewhat silently when this directory doesn't exists.
            if not os.path.isdir(EGG_CACHE_DIR):
                os.makedirs(EGG_CACHE_DIR)

            log.debug("getting plugin config")
            uri = get_safe(self._dvr_config, 'dvr_egg')
            module_name = self._dvr_config['dvr_mod']
            class_name = self._dvr_config['dvr_cls']
            config = self._dvr_config['startup_config']
        except:
            log.error('error in configuration', exc_info=True)
            raise

        egg_name = None
        egg_repo = None
        memento = self._get_state(DSA_STATE_KEY)

        log.warn("Get driver object: %s, %s, %s, %s", class_name, module_name,
                 egg_name, egg_repo)
        if uri:
            egg_name = uri.split('/')[-1] if uri.startswith('http') else uri
            egg_repo = uri[0:len(uri) - len(egg_name) -
                           1] if uri.startswith('http') else None

        log.info("instantiate driver plugin %s.%s", module_name, class_name)
        params = [
            config, memento, self.publish_callback,
            self.persist_state_callback, self.exception_callback
        ]
        return EGG_CACHE.get_object(class_name, module_name, egg_name,
                                    egg_repo, params)
Esempio n. 17
0
    def _validate_port_assignments(self, device_id, platform_port):
        deployment_context_type = type(self.deployment_obj.context).__name__

        self._validate_ooi_reference_designator(device_id, platform_port)

        # a one-to-one deployment of a device onto an RSN platform
        if OT.CabledInstrumentDeploymentContext == deployment_context_type or \
            OT.CabledNodeDeploymentContext == deployment_context_type:

            # validate IP address for a cabled node deployment
            from socket import inet_aton
            try:
                inet_aton(platform_port.ip_address)
            except :
                log.error('IP address validation failed for device. Device id: %s', device_id)

        # validate port_type based on deployment context
        # a platform device deployment should have UPLINK port type
        if OT.RemotePlatformDeploymentContext == deployment_context_type or \
            OT.CabledNodeDeploymentContext == deployment_context_type:
            if device_id in self.device_resources and self.device_resources[device_id].type_ is RT.PlatformDevice:
                if platform_port.port_type != PortTypeEnum.UPLINK:
                    log.warning('Type of port for platform port assignment should be UPLINK.  Device id: %s', device_id)

        #validate that parent_id is provided
        if not platform_port.parent_id:
            log.warning('Id of parent device should be provided in port assignment information. Device id: %s', device_id)
Esempio n. 18
0
    def launch(self):
        """
        Launches the simulator process as indicated by _COMMAND.

        @return (rsn_oms, uri) A pair with the CIOMSSimulator instance and the
                associated URI to establish connection with it.
        """
        log.debug("[OMSim] Launching: %s", _COMMAND)

        self._process = self._spawn(_COMMAND)

        if not self._process or not self.poll():
            msg = "[OMSim] Failed to launch simulator: %s" % _COMMAND
            log.error(msg)
            raise Exception(msg)

        log.debug("[OMSim] process started, pid: %s", self.getpid())

        # give it some time to start up
        sleep(5)

        # get URI:
        uri = None
        with open("logs/rsn_oms_simulator.yml", buffering=1) as f:
            # we expect one of the first few lines to be of the form:
            # rsn_oms_simulator_uri=xxxx
            # where xxxx is the uri -- see oms_simulator_server.
            while uri is None:
                line = f.readline()
                if line.index("rsn_oms_simulator_uri=") == 0:
                    uri = line[len("rsn_oms_simulator_uri="):].strip()

        self._rsn_oms = CIOMSClientFactory.create_instance(uri)
        return self._rsn_oms, uri
Esempio n. 19
0
        def recv_evt_messages(driver_client):
            """
            A looping function that monitors a ZMQ SUB socket for asynchronous
            driver events. Can be run as a thread or greenlet.
            @param driver_client The client object that launches the thread.
            """
            context = zmq.Context()
            sock = context.socket(zmq.SUB)
            sock.connect(driver_client.event_host_string)
            sock.setsockopt(zmq.SUBSCRIBE, '')
            log.info('Driver client event thread connected to %s.' %
                     driver_client.event_host_string)

            driver_client.stop_event_thread = False
            #last_time = time.time()
            while not driver_client.stop_event_thread:
                try:
                    evt = sock.recv_pyobj(flags=zmq.NOBLOCK)
                    log.debug('got event: %s' % str(evt))
                    if driver_client.evt_callback:
                        driver_client.evt_callback(evt)
                except zmq.ZMQError:
                    time.sleep(.5)
                except Exception, e:
                    log.error(
                        'Driver client error reading from zmq event socket: ' +
                        str(e))
                    log.error('Driver client error type: ' + str(type(e)))
Esempio n. 20
0
 def invoke_action(self, component_id, request):
     log.debug(component_id + ' performing: ' + str(request))
     component = self.components[component_id]
     try:
         component.perform_action(request)
     except Exception as ex:
         log.error('request failed: ' + str(request) + ' ' + str(ex), exc_info=True)
Esempio n. 21
0
    def _create_bucket(self, name, auth_type='sasl', bucket_type='couchbase',
                       parallel_db_and_view_compaction=False,
                       ram_quota_mb="128", replica_index='0', replica_number='0',
                       sasl_password=None, flush_enabled=False, proxy_port=11211):
        """
        If you set authType to "None", then you must specify a proxyPort number.
        If you set authType to "sasl", then you may optionally provide a "saslPassword" parameter.
           For Couchbase Sever 1.6.0, any SASL authentication-based access must go through a proxy at port 11211.
        """
        payload = dict()
        payload['name'] = name
        payload['authType'] = auth_type
        payload['bucketType'] = bucket_type
        if flush_enabled:
            payload['flushEnabled'] = '1'
        if parallel_db_and_view_compaction:
            payload['parallelDBAndViewCompaction'] = parallel_db_and_view_compaction
            payload['proxyPort'] = proxy_port
        payload['ramQuotaMB'] = ram_quota_mb
        if replica_index:
            payload['replicaIndex'] = replica_index
        if replica_number:
            payload['replicaNumber'] = replica_number
        if sasl_password:
            payload['saslPassword'] = sasl_password

        response = requests.post('http://%s:%s/pools/default/buckets' % (self.host, self.port), auth=(self.username, self.password), data=payload)
        if response.status_code != 202:
            log.error('Unable to create bucket %s on %s' % (name, self.host))
            raise BadRequest ('Couchbase error %d: %s' %(response.status_code, response.content))

        gevent.sleep(2)
Esempio n. 22
0
    def _process_packet(self):
        chunk = self._filehandle.read(1024)
        if chunk != '':
            self.buffer += chunk
            new_index = 0
            for match in self.ooi_ts_regex.finditer(self.buffer):
                payload = match.group(2)
                try:
                    packet_time = string_to_ntp_date_time(match.group(1))
                    header = PacketHeader(packet_type=PacketType.FROM_INSTRUMENT,
                                          payload_size=len(payload), packet_time=packet_time)
                    header.set_checksum(payload)
                    packet = PlaybackPacket(payload=payload, header=header)
                    self.callback(packet)
                except ValueError:
                    log.error('Unable to extract timestamp from record: %r' % match.group())
                new_index = match.end()

            if new_index > 0:
                self.buffer = self.buffer[new_index:]

            if len(self.buffer) > self.MAXBUF:
                self.buffer = self.buffer[-self.MAXBUF:]

            return True

        return False
Esempio n. 23
0
    def cmd_dvr(self, cmd, *args, **kwargs):
        """
        Command a driver by request-reply messaging. Package command
        message and send on blocking command socket. Block on same socket
        to receive the reply. Return the driver reply.
        @param cmd The driver command identifier.
        @param args Positional arguments of the command.
        @param kwargs Keyword arguments of the command.
        @retval Command result.
        """
        # Package command dictionary.
        msg = {'cmd':cmd,'args':args,'kwargs':kwargs}
        
        log.debug('Sending command %s.' % str(msg))
        while True:
            try:
                # Attempt command send. Retry if necessary.
                self.zmq_cmd_socket.send_pyobj(msg)
                if msg == 'stop_driver_process':
                    return 'driver stopping'

                # Command sent, break out and wait for reply.
                break    

            except zmq.ZMQError:
                # Socket not ready to accept send. Sleep and retry later.
                time.sleep(.5)

            except Exception,e:
                log.error('Driver client error writing to zmq socket: ' + str(e))
                raise SystemError('exception writing to zmq socket: ' + str(e))
Esempio n. 24
0
    def _validate_port_assignments(self, device_id, platform_port):
        deployment_context_type = type(self.deployment_obj.context).__name__

        self._validate_ooi_reference_designator(device_id, platform_port)

        # a one-to-one deployment of a device onto an RSN platform
        if OT.CabledInstrumentDeploymentContext == deployment_context_type or \
            OT.CabledNodeDeploymentContext == deployment_context_type:

            # validate IP address for a cabled node deployment
            from socket import inet_aton
            try:
                inet_aton(platform_port.ip_address)
            except :
                log.error('IP address validation failed for device. Device id: %s', device_id)

        # validate port_type based on deployment context
        # a platform device deployment should have UPLINK port type
        if OT.RemotePlatformDeploymentContext == deployment_context_type or \
            OT.CabledNodeDeploymentContext == deployment_context_type:
            if device_id in self.device_resources and self.device_resources[device_id].type_ is RT.PlatformDevice:
                if platform_port.port_type != PortTypeEnum.UPLINK:
                    log.warning('Type of port for platform port assignment should be UPLINK.  Device id: %s', device_id)

        #validate that parent_id is provided
        if not platform_port.parent_id:
            log.warning('Id of parent device should be provided in port assignment information. Device id: %s', device_id)
Esempio n. 25
0
    def set_configuration(self, config):
        log.warn("DRIVER: set_configuration")
        """
        expect configuration to have:
        - parser module/class
        - directory, wildcard to find data files
        - optional timestamp of last granule
        - optional poll rate
        - publish info
        """
        log.error("Log level: %s", log.getEffectiveLevel())
        log.debug('using configuration: %s', config)
        self.config = config
        self.max_records = get_safe(config, 'max_records', 100)
        self.stream_config = self.CFG.get('stream_config', {})
        if len(self.stream_config) == 1:
            stream_cfg = self.stream_config.values()[0]
        elif len(self.stream_config) > 1:
            stream_cfg = self.stream_config.values()[0]

        stream_id = stream_cfg['stream_id']
        stream_route = IonObject(OT.StreamRoute,
                                 routing_key=stream_cfg['routing_key'],
                                 exchange_point=stream_cfg['exchange_point'])
        param_dict = stream_cfg['stream_def_dict']['parameter_dictionary']
        self.publisher = StandaloneStreamPublisher(stream_id=stream_id,
                                                   stream_route=stream_route)
        self.parameter_dictionary = ParameterDictionary.load(param_dict)
        self.time_field = self.parameter_dictionary.get_temporal_context()
        self.latest_granule_time = get_safe(config, 'last_time', 0)
Esempio n. 26
0
    def publish_callback(self, particle):
        """
        Publish particles to the agent.

        TODO: currently we are generating JSON serialized objects
        we should be able to send with objects because we don't have
        the zmq boundray issue in this client.

        @return: number of records published
        """
        publish_count = 0
        try:
            for p in particle:
                # Can we use p.generate_dict() here?
                p_obj = p.generate()
                log.debug("Particle received: %s", p_obj)
                self._async_driver_event_sample(p_obj, None)
                publish_count += 1
        except Exception as e:
            log.error("Error logging particle: %s", e, exc_info=True)

            # Reset the connection id because we can not ensure contiguous
            # data.
            self._asp.reset_connection()

            log.debug("Publish ResourceAgentErrorEvent from publisher_callback")
            self._event_publisher.publish_event(
                error_msg = "Sample Parsing Exception: %s" % e,
                event_type='ResourceAgentErrorEvent',
                origin_type=self.ORIGIN_TYPE,
                origin=self.resource_id
            )

        return publish_count
Esempio n. 27
0
    def _build_parsed_values(self):
        """
        Take the hardware config data and parse it into
        values with appropriate tags.
        """
        try:
            unpack_string = '<4s14s2s4H2s12s4sh2s'
            (sync, serial_num, config, board_frequency, pic_version,
             hw_revision, recorder_size, status, spare, fw_version, cksum,
             _) = struct.unpack(unpack_string, self.raw_data)

            if not validate_checksum('<23H', self.raw_data, -4):
                log.warn(
                    "_parse_read_hw_config: Bad read hw response from instrument (%r)",
                    self.raw_data)
                self.contents[DataParticleKey.
                              QUALITY_FLAG] = DataParticleValue.CHECKSUM_FAILED

            config = common.convert_word_to_bit_field(config)
            status = common.convert_word_to_bit_field(status)
            recorder_installed = config[-1]
            compass_installed = config[-2]
            velocity_range = status[-1]

        except Exception:
            log.error('Error creating particle hardware config, raw data: %r',
                      self.raw_data)
            raise SampleException

        result = [{
            VID: NortekHardwareConfigDataParticleKey.SERIAL_NUM,
            VAL: serial_num
        }, {
            VID: NortekHardwareConfigDataParticleKey.RECORDER_INSTALLED,
            VAL: recorder_installed
        }, {
            VID: NortekHardwareConfigDataParticleKey.COMPASS_INSTALLED,
            VAL: compass_installed
        }, {
            VID: NortekHardwareConfigDataParticleKey.BOARD_FREQUENCY,
            VAL: board_frequency
        }, {
            VID: NortekHardwareConfigDataParticleKey.PIC_VERSION,
            VAL: pic_version
        }, {
            VID: NortekHardwareConfigDataParticleKey.HW_REVISION,
            VAL: hw_revision
        }, {
            VID: NortekHardwareConfigDataParticleKey.RECORDER_SIZE,
            VAL: recorder_size
        }, {
            VID: NortekHardwareConfigDataParticleKey.VELOCITY_RANGE,
            VAL: velocity_range
        }, {
            VID: NortekHardwareConfigDataParticleKey.FW_VERSION,
            VAL: fw_version
        }]

        log.debug('NortekHardwareConfigDataParticle: particle=%r', result)
        return result
Esempio n. 28
0
 def _run(self):
     try:
         args = self.srcname, self.select, self.reject
         # TODO Review this queue size
         # TODO Review reasoning behind using OrbreapThr vs. normal ORB API
         # I think it had something to do with orb.reap() blocking forever
         # on comms failures; maybe we could create our own orbreapthr
         # implementation?
         with OrbreapThr(*args, timeout=1, queuesize=10000) as orbreapthr:
             log.info("Connected to ORB %s %s %s" % (self.srcname, self.select,
                                                     self.reject, self.after))
             threadpool = ThreadPool(maxsize=1)
             try:
                 while True:
                     try:
                         success, value = threadpool.spawn(
                                 wrap_errors, (Exception,), orbreapthr.get, [], {}).get()
                         timestamp = ntp.now()
                         if not success:
                             raise value
                     except (Timeout, NoData), e:
                         log.debug("orbreapthr.get exception %r" % type(e))
                         pass
                     else:
                         if value is None:
                             raise Exception('Nothing to publish')
                         self._publish(value, timestamp)
             finally:
                 # This blocks until all threads in the pool return. That's
                 # critical; if the orbreapthr dies before the get thread,
                 # segfaults ensue.
                 threadpool.kill()
     except Exception, e:
         log.error("OrbPktSrc terminating due to exception", exc_info=True)
         raise
Esempio n. 29
0
    def _load(self):
        try:
            results = DBFactory.get_db().get(self.guid)
            for key in results:
                val = results[key]
                if isinstance(val, basestring) and val.startswith('DICTABLE'):
                    i = val.index('|', 9)
                    smod, sclass = val[9:i].split(':')
                    value = unpack(val[i+1:])
                    module = __import__(smod, fromlist=[sclass])
                    classobj = getattr(module, sclass)
                    value = classobj._fromdict(value)
                elif key in ('root_dir', 'file_path'):
                    # No op - set in constructor
                    continue
                elif key == 'brick_tree':
                    setattr(self, key, RTreeProxy.deserialize(val))
                    continue
                elif key == 'span_collection':
                    unpacked = unpack(val)
                    value = SpanCollectionByFile.from_str(unpacked)
                    log.trace("Reconstructed SpanCollection for %s: %s", self.guid, str(value))
                else:
                    value = unpack(val)

                if isinstance(value, tuple):
                    value = list(value)

                setattr(self, key, value)

        except Exception as e:
            log.error("Caught exception reconstructing metadata for guid %s : %s", self.guid, e.message)
            raise
Esempio n. 30
0
 def run(self):
     log.debug(self.agent.__class__.__name__ + " <" + self.agent.agent_id + "> running")
     try:
         self.agent.manage()
     except Exception:
         log.error("agent exception: " + format_exc())
     else:
         log.debug("agent startup thread complete")
Esempio n. 31
0
    def _build_parsed_values(self):
        """
        Take the velocity header data sample format and parse it into
        values with appropriate tags.
        @throws SampleException If there is a problem with sample creation
        """
        log.debug('VectorVelocityHeaderDataParticle: raw data =%r',
                  self.raw_data)

        try:
            unpack_string = '<4s6sH8B20sH'
            sync, timestamp, number_of_records, noise1, noise2, noise3, _, correlation1, correlation2, correlation3, _,\
                _, cksum = struct.unpack(unpack_string, self.raw_data)

            if not validate_checksum('<20H', self.raw_data):
                log.warn("Failed checksum in %s from instrument (%r)",
                         self._data_particle_type, self.raw_data)
                self.contents[DataParticleKey.
                              QUALITY_FLAG] = DataParticleValue.CHECKSUM_FAILED

            timestamp = common.convert_time(timestamp)
            self.set_internal_timestamp(
                (timestamp - datetime(1900, 1, 1)).total_seconds())

        except Exception as e:
            log.error(
                'Error creating particle vel3d_cd_data_header, raw data: %r',
                self.raw_data)
            raise SampleException(e)

        result = [{
            VID: VectorVelocityHeaderDataParticleKey.TIMESTAMP,
            VAL: str(timestamp)
        }, {
            VID: VectorVelocityHeaderDataParticleKey.NUMBER_OF_RECORDS,
            VAL: number_of_records
        }, {
            VID: VectorVelocityHeaderDataParticleKey.NOISE1,
            VAL: noise1
        }, {
            VID: VectorVelocityHeaderDataParticleKey.NOISE2,
            VAL: noise2
        }, {
            VID: VectorVelocityHeaderDataParticleKey.NOISE3,
            VAL: noise3
        }, {
            VID: VectorVelocityHeaderDataParticleKey.CORRELATION1,
            VAL: correlation1
        }, {
            VID: VectorVelocityHeaderDataParticleKey.CORRELATION2,
            VAL: correlation2
        }, {
            VID: VectorVelocityHeaderDataParticleKey.CORRELATION3,
            VAL: correlation3
        }]

        log.debug('VectorVelocityHeaderDataParticle: particle=%s', result)
        return result
Esempio n. 32
0
 def _start_driver(self, dvr_config):
     log.warn("DRIVER: _start_driver: %s", dvr_config)
     try:
         self.set_configuration(dvr_config)
     except:
         log.error('error in configuration', exc_info=True)
         raise
     self._dvr_client = self
     self._asp.reset_connection()
Esempio n. 33
0
 def __init__(self, directory, wildcard, callback, exception_callback=None, interval=1):
     try:
         if not os.path.isdir(directory):
             raise ValueError('%s is not a directory'%directory)
         self._path = directory + '/' + wildcard
         self._last_filename = None
         super(DirectoryPoller,self).__init__(self._check_for_files, callback, exception_callback, interval)
     except:
         log.error('failed init?', exc_info=True)
Esempio n. 34
0
    def _get_data(cls, config):
        """
        Iterable function that acquires data from a source iteratively based on constraints provided by config
        Passed into BaseDataHandler._publish_data and iterated to publish samples.
        @param config dict containing configuration parameters, may include constraints, formatters, etc
        @retval an iterable that returns well-formed Granule objects on each iteration
        """
        new_flst = get_safe(config, 'constraints.new_files', [])
        parser_mod = get_safe(config, 'parser_mod', '')
        parser_cls = get_safe(config, 'parser_cls', '')

        module = __import__(parser_mod, fromlist=[parser_cls])
        classobj = getattr(module, parser_cls)

        for f in new_flst:
            try:
                size = os.stat(f[0]).st_size
                try:
                    #find the new data check index in config
                    index = -1
                    for ndc in config['set_new_data_check']:
                        if ndc[0] == f[0]:
                            index = config['set_new_data_check'].index(ndc)
                            break
                except:
                    log.error('File name not found in attachment')

                parser = classobj(f[0], f[3])

                max_rec = get_safe(config, 'max_records', 1)
                stream_def = get_safe(config, 'stream_def')
                while True:
                    particles = parser.get_records(max_count=max_rec)
                    if not particles:
                        break

                    rdt = RecordDictionaryTool(stream_definition_id=stream_def)

                    populate_rdt(rdt, particles)

                    g = rdt.to_granule()

                    # TODO: record files already read for future additions...
                    #                    #update new data check with the latest file position
                    if 'set_new_data_check' in config and index > -1:
                        # WRONG: should only record this after file finished parsing,
                        # but may not have another yield at that point to trigger update
                        config['set_new_data_check'][index] = (f[0], f[1],
                                                               f[2], size)

                    yield g

#                parser.close()

            except Exception as ex:
                # TODO: Decide what to do here, raise an exception or carry on
                log.error('Error parsing data file \'{0}\': {1}'.format(f, ex))
Esempio n. 35
0
    def construct_protocol(self, proto_module):
        module = importlib.import_module(proto_module)
        if hasattr(module, 'create_playback_protocol'):
            return module.create_playback_protocol(self.handle_event)

        log.error(
            'Unable to import and create playback protocol from module: %r',
            module)
        sys.exit(1)
Esempio n. 36
0
 def _start_driver(self, dvr_config):
     log.warn("DRIVER: _start_driver: %s", dvr_config)
     try:
         self.set_configuration(dvr_config)
     except:
         log.error('error in configuration', exc_info=True)
         raise
     self._dvr_client = self
     self._asp.reset_connection()
    def _get_data(cls, config):
        """
        Iterable function that acquires data from a source iteratively based on constraints provided by config
        Passed into BaseDataHandler._publish_data and iterated to publish samples.
        @param config dict containing configuration parameters, may include constraints, formatters, etc
        @retval an iterable that returns well-formed Granule objects on each iteration
        """
        new_flst = get_safe(config, "constraints.new_files", [])
        parser_mod = get_safe(config, "parser_mod", "")
        parser_cls = get_safe(config, "parser_cls", "")

        module = __import__(parser_mod, fromlist=[parser_cls])
        classobj = getattr(module, parser_cls)

        for f in new_flst:
            try:
                size = os.stat(f[0]).st_size
                try:
                    # find the new data check index in config
                    index = -1
                    for ndc in config["set_new_data_check"]:
                        if ndc[0] == f[0]:
                            index = config["set_new_data_check"].index(ndc)
                            break
                except:
                    log.error("File name not found in attachment")

                parser = classobj(f[0], f[3])

                max_rec = get_safe(config, "max_records", 1)
                stream_def = get_safe(config, "stream_def")
                while True:
                    particles = parser.get_records(max_count=max_rec)
                    if not particles:
                        break

                    rdt = RecordDictionaryTool(stream_definition_id=stream_def)

                    populate_rdt(rdt, particles)

                    g = rdt.to_granule()

                    # TODO: record files already read for future additions...
                    #                    #update new data check with the latest file position
                    if "set_new_data_check" in config and index > -1:
                        # WRONG: should only record this after file finished parsing,
                        # but may not have another yield at that point to trigger update
                        config["set_new_data_check"][index] = (f[0], f[1], f[2], size)

                    yield g

            #                parser.close()

            except Exception as ex:
                # TODO: Decide what to do here, raise an exception or carry on
                log.error("Error parsing data file '{0}': {1}".format(f, ex))
Esempio n. 38
0
 def _spawn(self, spawnargs):
     """
     Launch a process using popen
     @param spawnargs a list of arguments for the Popen command line.  
                      The first argument must be a path to a
                      program and arguments much be in additional list elements.
     @return subprocess.Popen object
     """
     log.error('spawnargs: %s', spawnargs)
     return subprocess.Popen(spawnargs, env=os.environ, close_fds=True)
Esempio n. 39
0
 def _validate_driver_config(self):
     out = True
     for key in 'stream_id', 'stream_route', 'poller', 'parser', 'parameter_dict':
         if key not in self._dvr_config:
             log.error('missing key: %s',key)
             out = False
     if get_safe(self._dvr_config, 'max_records', 100)<1:
         log.error('max_records=%d, must be at least 1 or unset (default 100)', self.max_records)
         out = False
     return out
Esempio n. 40
0
    def _create_driver_plugin(self):
        try:
            # Ensure the egg cache directory exists. ooi.reflections will fail
            # somewhat silently when this directory doesn't exists.
            if not os.path.isdir(EGG_CACHE_DIR):
                os.makedirs(EGG_CACHE_DIR)

            log.debug("getting plugin config")
            uri = get_safe(self._dvr_config, "dvr_egg")
            module_name = self._dvr_config["dvr_mod"]
            class_name = self._dvr_config["dvr_cls"]
            config = self._dvr_config["startup_config"]
            config["resource_id"] = self.resource_id
        except:
            log.error("error in configuration", exc_info=True)
            raise

        egg_name = None
        egg_repo = None

        memento = self._get_state(DSA_STATE_KEY)

        if memento:
            # memento not empty, which is the case after restart. Just keep what we have.
            log.debug("Using process persistent state: %s", memento)
        else:
            # memento empty, which is the case after a fresh start. See if we got stuff in CFG

            # Set state based on CFG using prior process' state
            prior_state = self.CFG.get_safe("agent.prior_state")

            if prior_state:
                if isinstance(prior_state, dict):
                    if DSA_STATE_KEY in prior_state:
                        memento = prior_state[DSA_STATE_KEY]
                        log.debug("Using persistent state from prior agent run: %s", memento)
                        self.persist_state_callback(memento)
                else:
                    raise InstrumentStateException("agent.prior_state invalid: %s" % prior_state)

        log.debug("Get driver object: %s, %s, %s, %s, %s", class_name, module_name, egg_name, egg_repo, memento)
        if uri:
            egg_name = uri.split("/")[-1] if uri.startswith("http") else uri
            egg_repo = uri[0 : len(uri) - len(egg_name) - 1] if uri.startswith("http") else None

        log.debug("instantiate driver plugin %s.%s", module_name, class_name)
        params = [
            config,
            memento,
            self.publish_callback,
            self.persist_state_callback,
            self.event_callback,
            self.exception_callback,
        ]
        return EGG_CACHE.get_object(class_name, module_name, egg_name, egg_repo, params)
Esempio n. 41
0
    def _start_driver(self, dvr_config):
        log.warn("DRIVER: _start_driver: %s", dvr_config)
        self._dvr_client = self._create_driver_plugin()

        if self._dvr_client == None:
            log.error("Failed to instantiate driver plugin!")
            raise InstrumentStateException('failed to start driver')

        log.warn("driver client created")

        self._asp.reset_connection()
Esempio n. 42
0
    def _start_driver(self, dvr_config):
        log.warn("DRIVER: _start_driver: %s", dvr_config)
        self._dvr_client = self._create_driver_plugin()

        if self._dvr_client == None:
            log.error("Failed to instantiate driver plugin!")
            raise InstrumentStateException('failed to start driver')

        log.warn("driver client created")

        self._asp.reset_connection()
Esempio n. 43
0
 def jsonify(self, events):
     try:
         return json.dumps(events)
     except UnicodeDecodeError as e:
         temp = []
         for each in events:
             try:
                 json.dumps(each)
                 temp.append(each)
             except UnicodeDecodeError as e:
                 log.error('Unable to encode event as JSON: %r', e)
         return json.dumps(temp)
Esempio n. 44
0
 def jsonify(self, events):
     try:
         return json.dumps(events)
     except UnicodeDecodeError as e:
         temp = []
         for each in events:
             try:
                 json.dumps(each)
                 temp.append(each)
             except UnicodeDecodeError as e:
                 log.error('Unable to encode event as JSON: %r', e)
         return json.dumps(temp)
Esempio n. 45
0
    def _handler_get_resource_capabilities(self, *args, **kwargs):
        """
        """
        next_state = None
        result = None

        try:
            next_state = None
            result = self._dvr_client.cmd_dvr('get_resource_capabilities', *args, **kwargs)
        except Exception as e:
            log.error("get_capabilities exception: %s", e)

        return (next_state, result)
Esempio n. 46
0
 def connect(self):
     delay = 1
     max_delay = 60
     while True:
         try:
             connection = qm.Connection(self.url, reconnect=False, username=self.username, password=self.password)
             connection.open()
             session = connection.session()
             self.sender = session.sender('%s; {create: always, node: {type: queue, durable: true}}' % self.queue)
             log.info('Shovel connected to QPID')
             return
         except qm.ConnectError:
             log.error('Shovel QPID connection error. Sleep %d seconds', delay)
             time.sleep(delay)
             delay = min(max_delay, delay*2)
Esempio n. 47
0
    def load_url(cls, url, contexts, timeout=5):
        '''
        Load a parameter definition plugin using a URL
        '''

        # Use gevent instaead of requests socket based timeout.
        with gevent.timeout.Timeout(timeout):
            r = requests.get(url)
        if r.status_code != 200:
            log.error('Failed to load module from %s', url)
            return []
        code = r.text
        if 'context_url' in sys.modules: sys.modules.pop('context_url')
        mixin_module = imp.new_module('context_url')
        exec code in mixin_module.__dict__
        return cls.load_module(mixin_module, contexts)
Esempio n. 48
0
 def __init__(self,
              directory,
              wildcard,
              callback,
              exception_callback=None,
              interval=1):
     try:
         if not os.path.isdir(directory):
             raise ValueError('%s is not a directory' % directory)
         self._path = directory + '/' + wildcard
         self._last_filename = None
         super(DirectoryPoller,
               self).__init__(self._check_for_files, callback,
                              exception_callback, interval)
     except:
         log.error('failed init?', exc_info=True)
Esempio n. 49
0
        def run(self):
            self.running = True

            while self.running and not self.registered:
                try:
                    ConsulServiceRegistry.register_driver(
                        self.reference_designator, self.port)
                    self.registered = True
                except ConnectionError:
                    log.error(
                        'Unable to register with Consul, will attempt again in %d secs',
                        DRIVER_SERVICE_TTL / 2)
                    time.sleep(DRIVER_SERVICE_TTL / 2)

            while self.running:
                CONSUL.agent.check.ttl_pass(self.check_id)
                time.sleep(DRIVER_SERVICE_TTL / 2)
Esempio n. 50
0
 def construct_driver(self):
     """
     Attempt to import and construct the driver object based on
     configuration.
     @retval True if successful, False otherwise.
     """
     try:
         module = importlib.import_module(self.driver_module)
         driver_class = getattr(module, self.driver_class)
         self.driver = driver_class(self.send_event)
         log.info('Imported and created driver from module: %r class: %r driver: %r',
                  module, driver_class, self.driver)
         return True
     except Exception as e:
         log.error('Could not import/construct driver module %s, class %s.',
                   self.driver_module, self.driver_class)
         log.error('%s' % str(e))
         return False
Esempio n. 51
0
 def recv_evt_messages():
     """
     A looping function that monitors a ZMQ SUB socket for asynchronous
     driver events. Can be run as a thread or greenlet.
     @param driver_client The client object that launches the thread.
     """
     self.stop_event_thread = False
     while not self.stop_event_thread:
         try:
             evt = self.zmq_evt_socket.recv_pyobj(flags=zmq.NOBLOCK)
             log.debug('got event: %s' % str(evt))
             if self.evt_callback:
                 self.evt_callback(evt)
         except zmq.ZMQError:
             time.sleep(.5)
         except Exception, e:
             log.error('Driver client error reading from zmq event socket: ' + str(e))
             log.error('Driver client error type: ' + str(type(e)))                    
Esempio n. 52
0
    def handle_event(self, event_type, val=None):
        """
        Construct and send an asynchronous driver event.
        @param event_type a DriverAsyncEvent type specifier.
        @param val event value for sample and test result events.
        """
        event = {'type': event_type, 'value': val, 'time': time.time()}

        if isinstance(event[EventKeys.VALUE], Exception):
            event[EventKeys.VALUE] = encode_exception(event[EventKeys.VALUE])

        if event[EventKeys.TYPE] == DriverAsyncEvent.ERROR:
            log.error(event)

        if event[EventKeys.TYPE] == DriverAsyncEvent.SAMPLE:
            if event[EventKeys.VALUE].get('stream_name') != 'raw':
                # don't publish raw
                self.particle_publisher.enqueue(event)
        else:
            self.event_publisher.enqueue(event)
Esempio n. 53
0
    def _publish(self, events, headers):
        msg_headers = self._merge_headers(headers)

        now = time.time()
        try:
            publish = self.connection.ensure(self.producer,
                                             self.producer.publish,
                                             max_retries=4)
            publish(json.dumps(events),
                    headers=msg_headers,
                    user_id=self.username,
                    declare=[self._queue],
                    content_type='text/plain')
            log.info(
                'Published %d messages using KOMBU in %.2f secs with headers %r',
                len(events),
                time.time() - now, msg_headers)
        except Exception as e:
            log.error('Exception attempting to publish events: %r', e)
            return events
Esempio n. 54
0
    def _got_device_status_event(self, evt, *args, **kwargs):
        """
        Handles "device_added" and "device_removed" DeviceStatusEvents.
        """

        expected_subtypes = ("device_added", "device_removed",
                             "device_failed_command")

        with self._lock:
            if not self._active:
                log.warn(
                    "%r: _got_device_status_event called but "
                    "manager has been destroyed", self._platform_id)
                return

        # we are only interested in DeviceStatusEvent directly:
        # (note that also subclasses of DeviceStatusEvent will be notified here)
        if evt.type_ != "DeviceStatusEvent":
            log.trace(
                "%r: ignoring event type %r. Only handle DeviceStatusEvent directly.",
                self._platform_id, evt.type_)
            return

        sub_type = evt.sub_type

        log.debug("%r: _got_device_status_event: %s\n sub_type=%r",
                  self._platform_id, evt, evt.sub_type)

        if not sub_type in expected_subtypes:
            log.error(
                "StatusManager._got_device_status_event: Unexpected sub_type=%r. Expecting one of %r"
                % (sub_type, expected_subtypes))
            return

        with self._lock:
            if sub_type == "device_added":
                self._device_added_event(evt)
            elif sub_type == "device_removed":
                self._device_removed_event(evt)
            else:
                self.device_failed_command_event(evt)
Esempio n. 55
0
def process_oms_request():
    """
    This is the method that is called when the OMS POSTs OMS Events to
    this registered listener at the "/" path.
    :return:
    """

    if isinstance(request.json, list):
        # Log the list of Alert & Alarm messages from the OMS Event
        for alert_alarm_dict in request.json:
            aa_publisher.enqueue(alert_alarm_dict)
            log.info('oms_alert_alarm_server: OMS_AA_MSG: %r',
                     alert_alarm_dict)

        # Publish the list of Alert & Alarm messages to qpid
        aa_publisher.publish()

    else:
        log.error('No data in the POSTed alert/alarm OMS Event ...')

    return '', httplib.ACCEPTED
Esempio n. 56
0
    def _validate_driver_config(self):
        """
        Verify the agent configuration contains a driver config.  called by uninitialize_initialize handler
        in the IA class
        """
        log.debug("Driver Config: %s", self._dvr_config)
        out = True

        for key in ('startup_config', 'dvr_mod', 'dvr_cls'):
            if key not in self._dvr_config:
                log.error('missing key: %s', key)
                out = False

        for key in ('stream_config', ):
            if key not in self.CFG:
                log.error('missing key: %s', key)
                out = False

        if get_safe(self._dvr_config, 'max_records', 100) < 1:
            log.error(
                'max_records=%d, must be at least 1 or unset (default 100)',
                self.max_records)
            out = False

        return out
Esempio n. 57
0
 def poller_callback(self, file_like_object, state_memento):
     log.debug('poller found data to parse')
     try:
         config = self.config['parser']
         parser = self._create_plugin(
             config,
             kwargs=dict(open_file=file_like_object,
                         parse_after=self.latest_granule_time))
         records = parser.get_records(max_count=self.max_records)
         log.trace('have %d records', len(records))
         while records:
             self._asp.on_sample_mult(records)
             # # secretly uses pubsub client
             # rdt = RecordDictionaryTool(param_dictionary=self.parameter_dictionary)
             # for key in records[0]: #assume all dict records have same keys
             #     rdt[key] = [ record[key] for record in records ]
             # g = rdt.to_granule()
             # self.publisher.publish(g)
             records = parser.get_records(max_count=self.max_records)
         self._set_state('poller_state', state_memento)
     except Exception as ex:
         log.error('error handling data', exc_info=True)