Exemplo n.º 1
0
    def terminate_process(self, process_id, do_notifications=True):
        """
        Terminates a process and all its resources. Termination is graceful with timeout.

        @param  process_id          The id of the process to terminate. Should exist in the container's
                                    list of processes or this will raise.
        @param  do_notifications    If True, emits process state changes for TERMINATING and TERMINATED.
                                    If False, supresses any state changes. Used near EXITED and FAILED.
        """
        process_instance = self.procs.get(process_id, None)
        if not process_instance:
            raise BadRequest("Cannot terminate. Process id='%s' unknown on container id='%s'" % (
                                        process_id, self.container.id))

        log.info("ProcManager.terminate_process: %s -> pid=%s", process_instance._proc_name, process_id)

        if do_notifications:
            self._call_proc_state_changed(process_instance, ProcessStateEnum.TERMINATING)

        self._process_quit(process_instance)

        self._unregister_process(process_id, process_instance)

        if do_notifications:
            self._call_proc_state_changed(process_instance, ProcessStateEnum.TERMINATED)
Exemplo n.º 2
0
    def _execute(self, cprefix, command):
        if not command:
            raise iex.BadRequest("execute argument 'command' not present")
        if not command.command:
            raise iex.BadRequest("command not set")

        cmd_res = IonObject("AgentCommandResult", command_id=command.command_id, command=command.command)
        cmd_func = getattr(self, cprefix + str(command.command), None)
        if cmd_func:
            cmd_res.ts_execute = get_ion_ts()
            try:
                res = cmd_func(*command.args, **command.kwargs)
                cmd_res.status = 0
                cmd_res.result = res
            except Exception as ex:
                # TODO: Distinguish application vs. uncaught exception
                cmd_res.status = getattr(ex, 'status_code', -1)
                cmd_res.result = str(ex)
                log.info("Agent function failed with ex=%s msg=%s" % (type(ex), str(ex)))
        else:
            log.info("Agent command not supported: %s" % (command.command))
            ex = iex.NotFound("Command not supported: %s" % command.command)
            cmd_res.status = iex.NotFound.status_code
            cmd_res.result = str(ex)
        return cmd_res
Exemplo n.º 3
0
    def on_start(self):
        '''
        Starts the process
        '''
        log.info('Replay Process Started')
        super(ReplayProcess,self).on_start()
        dsm_cli = DatasetManagementServiceProcessClient(process=self)
        pubsub  = PubsubManagementServiceProcessClient(process=self)

        self.dataset_id      = self.CFG.get_safe('process.dataset_id', None)
        self.delivery_format = self.CFG.get_safe('process.delivery_format',{})
        self.start_time      = self.CFG.get_safe('process.query.start_time', None)
        self.end_time        = self.CFG.get_safe('process.query.end_time', None)
        self.stride_time     = self.CFG.get_safe('process.query.stride_time', None)
        self.parameters      = self.CFG.get_safe('process.query.parameters',None)
        self.publish_limit   = self.CFG.get_safe('process.query.publish_limit', 10)
        self.tdoa            = self.CFG.get_safe('process.query.tdoa',None)
        self.stream_id       = self.CFG.get_safe('process.publish_streams.output', '')
        self.stream_def      = pubsub.read_stream_definition(stream_id=self.stream_id)
        self.stream_def_id   = self.stream_def._id

        self.publishing.clear()
        self.play.set()
        self.end.clear()

        if self.dataset_id is None:
            raise BadRequest('dataset_id not specified')

        self.dataset = dsm_cli.read_dataset(self.dataset_id)
        self.pubsub = PubsubManagementServiceProcessClient(process=self)
Exemplo n.º 4
0
    def terminate_process(self, process_id, do_notifications=True):
        """
        Terminates a process and all its resources. Termination is graceful with timeout.

        @param  process_id          The id of the process to terminate. Should exist in the container's
                                    list of processes or this will raise.
        @param  do_notifications    If True, emits process state changes for TERMINATING and TERMINATED.
                                    If False, supresses any state changes. Used near EXITED and FAILED.
        """
        process_instance = self.procs.get(process_id, None)
        if not process_instance:
            raise BadRequest(
                "Cannot terminate. Process id='%s' unknown on container id='%s'"
                % (process_id, self.container.id))

        log.info("ProcManager.terminate_process: %s -> pid=%s",
                 process_instance._proc_name, process_id)

        if do_notifications:
            self._call_proc_state_changed(process_instance,
                                          ProcessStateEnum.TERMINATING)

        self._process_quit(process_instance)

        self._unregister_process(process_id, process_instance)

        if do_notifications:
            self._call_proc_state_changed(process_instance,
                                          ProcessStateEnum.TERMINATED)
    def on_initial_bootstrap(self, process, config, **kwargs):

        if os.environ.get('PYCC_MODE'):
            # This environment is an ion integration test
            log.info('PYCC_MODE: skipping qc_post_processor launch')
            return
        if self.process_exists(process, 'qc_post_processor'):
            # Short circuit the bootstrap to make sure not more than one is ever started
            return

        self.scheduler_service = SchedulerServiceProcessClient(process=process)
        self.process_dispatcher = ProcessDispatcherServiceProcessClient(process=process)
        self.run_interval = CFG.get_safe('service.qc_processing.run_interval', 24)

        interval_key = uuid4().hex # Unique identifier for this process

        config = DotDict()
        config.process.interval_key = interval_key

        process_definition = ProcessDefinition(name='qc_post_processor',
            executable={'module':'ion.processes.data.transforms.qc_post_processing', 'class':'QCPostProcessing'})
        process_definition_id = self.process_dispatcher.create_process_definition(process_definition)

        process_id = self.process_dispatcher.create_process(process_definition_id)
        self.process_dispatcher.schedule_process(process_definition_id, process_id=process_id, configuration=config)


        timer_id = self.scheduler_service.create_interval_timer(start_time=str(time.time()),
                end_time='-1', #Run FOREVER
                interval=3600*self.run_interval,
                event_origin=interval_key)
    def update_data_process_inputs(self, data_process_id="", in_stream_ids=None):
        #@TODO: INPUT STREAM VALIDATION
        log.debug("Updating inputs to data process '%s'", data_process_id)
        data_process_obj = self.clients.resource_registry.read(data_process_id)
        subscription_id = data_process_obj.input_subscription_id
        was_active = False 
        if subscription_id:
            # get rid of all the current streams
            try:
                log.debug("Deactivating subscription '%s'", subscription_id)
                self.clients.pubsub_management.deactivate_subscription(subscription_id)
                was_active = True

            except BadRequest:
                log.info('Subscription was not active')

            self.clients.pubsub_management.delete_subscription(subscription_id)

        new_subscription_id = self.clients.pubsub_management.create_subscription(data_process_obj.name,
                                                                                 stream_ids=in_stream_ids)
        data_process_obj.input_subscription_id = new_subscription_id

        self.clients.resource_registry.update(data_process_obj)

        if was_active:
            log.debug("Activating subscription '%s'", new_subscription_id)
            self.clients.pubsub_management.activate_subscription(new_subscription_id)
Exemplo n.º 7
0
    def check_fill_values(self):
        log.info('check_fill_values')
        self.new_rdt()
        self.init_check()
        self.rdt['time'] = np.arange(5)
        self.rdt['temp'] = [12] * 5
        self.rdt.fetch_lookup_values()

        np.testing.assert_array_equal(self.rdt['tempwat_glblrng_qc'],
                                      [-99] * 5)
        np.testing.assert_array_equal(self.rdt['tempwat_spketst_qc'],
                                      [-99] * 5)
        np.testing.assert_array_equal(self.rdt['tempwat_stuckvl_qc'],
                                      [-99] * 5)
        np.testing.assert_array_equal(self.rdt['tempwat_trndtst_qc'],
                                      [-99] * 5)
        np.testing.assert_array_equal(self.rdt['tempwat_gradtst_qc'],
                                      [-99] * 5)
        self.ph.publish_rdt_to_data_product(self.dp_id, self.rdt)

        self.dataset_monitor.event.wait(10)
        rdt = RecordDictionaryTool.load_from_granule(
            self.data_retriever.retrieve(self.dataset_id))
        np.testing.assert_array_equal(rdt['tempwat_glblrng_qc'], [-99] * 5)
        np.testing.assert_array_equal(rdt['tempwat_spketst_qc'], [-99] * 5)
        np.testing.assert_array_equal(rdt['tempwat_stuckvl_qc'], [-99] * 5)
        np.testing.assert_array_equal(rdt['tempwat_trndtst_qc'], [-99] * 5)
        np.testing.assert_array_equal(rdt['tempwat_gradtst_qc'], [-99] * 5)
    def update_notification(self, notification=None):
        """Updates the provided NotificationRequest object.  Throws NotFound exception if
        an existing version of NotificationRequest is not found.  Throws Conflict if
        the provided NotificationRequest object is not based on the latest persisted
        version of the object.

        @param notification    NotificationRequest
        @throws BadRequest    if object does not have _id or _rev attribute
        @throws NotFound    object with specified id does not exist
        @throws Conflict    object not based on latest persisted object version
        """
        # Read existing Notification object and see if it exists
        notification_id = notification._id
        old_notification = self.event_processors[notification_id].notification._res_obj

        if not old_notification:
            raise NotFound("UserNotificationService.update_notification(): Notification %s does not exist" % notification_id)

        # check to see if the new notification is different than the old notification only in the delivery config fields
        if notification.origin != old_notification.origin or \
                notification.origin_type != old_notification.origin_type or \
                        notification.event_type != old_notification.event_type or \
                                notification.event_subtype != old_notification.event_subtype:


            log.info('Update unsuccessful. Only the delivery config is allowed to be modified!')
            raise BadRequest('Can not update the subscription for an event notification')

        else: # only the delivery_config is being modified, so we can go ahead with the update...
            _event_processor = self.event_processors[notification_id]
            _event_processor.notification = notification
            _event_processor.notification.set_notification_id(notification_id)
            # finally update the notification in the RR
            self.clients.resource_registry.update(notification)
            log.debug('Updated notification object with id: %s' % notification_id)
Exemplo n.º 9
0
    def stop(self, force=False):
        """
        Stop the driver process.  We try to stop gracefully using the driver client if we can, otherwise a simple kill
        does the job.
        """

        if self._driver_process:

            if not force and self._driver_client:
                try:
                    self._driver_client.done()
                    self._driver_process.wait()
                    log.info('Driver process stopped.')
                except:
                    try:
                        self._driver_process.kill()
                        self._driver_process.wait()
                        log.error('Driver process killed.')
                    except:
                        log.error('Exception killing driver process')
                        log.error(type(ex))
                        log.error(ex)

            else:
                try:
                    self._driver_process.kill()
                    self._driver_process.wait()
                    log.error('Driver process killed.')
                except Exception as ex:
                    log.error('Exception killing driver process')
                    log.error(type(ex))
                    log.error(ex)

        self._driver_process = None
        self._driver_client = None
Exemplo n.º 10
0
    def memory_usage(self):
        """
        Get the current memory usage for the current driver process.
        @returns memory usage in KB of the current driver process
        """
        driver_pid = self.getpid()
        if not driver_pid:
            log.warn("no process running")
            return 0

        #ps_process = subprocess.Popen(["ps", "-p", self.getpid(), "-o", "rss,pid"])
        ps_process = subprocess.Popen(
            ["ps", "-o rss,pid", "-p %s" % self.getpid()],
            stdout=subprocess.PIPE)
        retcode = ps_process.poll()

        usage = 0
        for line in ps_process.stdout:
            if not line.strip().startswith('RSS'):
                try:
                    fields = line.split()
                    pid = int(fields[1])
                    if pid == driver_pid:
                        usage = int(fields[0])
                except:
                    log.warn("Failed to parse output for memory usage: %s" %
                             line)
                    usage = 0

        if usage:
            log.info("process memory usage: %dk" % usage)
        else:
            log.warn("process not running")

        return usage
Exemplo n.º 11
0
def log_message(prefix="MESSAGE",
                msg=None,
                headers=None,
                recv=None,
                delivery_tag=None,
                is_send=True):
    """
    Utility function to print an legible comprehensive summary of a received message.
    @NOTE: This is an expensive operation
    """
    try:
        headers = headers or {}
        _sender = headers.get('sender', '?') + "(" + headers.get(
            'sender-name', '') + ")"
        _send_hl, _recv_hl = ("###", "") if is_send else ("", "###")

        if recv and getattr(recv, '__iter__', False):
            recv = ".".join(str(item) for item in recv if item)
        _recv = headers.get('receiver', '?')
        _opstat = "op=%s" % headers.get(
            'op', '') if 'op' in headers else "status=%s" % headers.get(
                'status_code', '')
        try:
            import msgpack
            _msg = msgpack.unpackb(msg)
            _msg = str(_msg)
        except Exception:
            _msg = str(msg)
        _msg = _msg[0:400] + "..." if len(_msg) > 400 else _msg
        _delivery = "\nDELIVERY: tag=%s" % delivery_tag if delivery_tag else ""
        log.info("%s: %s%s%s -> %s%s%s %s:\nHEADERS: %s\nCONTENT: %s%s",
                 prefix, _send_hl, _sender, _send_hl, _recv_hl, _recv,
                 _recv_hl, _opstat, str(headers), _msg, _delivery)
    except Exception as ex:
        log.warning("%s log error: %s", prefix, str(ex))
Exemplo n.º 12
0
 def execute_retrieve(self):
     '''
     execute_retrieve Executes a retrieval and returns the result 
     as a value in lieu of publishing it on a stream
     '''
     try:
         coverage = DatasetManagementService._get_coverage(self.dataset_id,
                                                           mode='r')
         if coverage.num_timesteps == 0:
             log.info('Reading from an empty coverage')
             rdt = RecordDictionaryTool(
                 param_dictionary=coverage.parameter_dictionary)
         else:
             rdt = self._coverage_to_granule(coverage=coverage,
                                             start_time=self.start_time,
                                             end_time=self.end_time,
                                             stride_time=self.stride_time,
                                             parameters=self.parameters,
                                             tdoa=self.tdoa)
     except:
         log.exception('Problems reading from the coverage')
         raise BadRequest('Problems reading from the coverage')
     finally:
         coverage.close(timeout=5)
     return rdt.to_granule()
Exemplo n.º 13
0
    def _transform(self, obj):
        # Note: This check to detect an IonObject is a bit risky (only type_)
        if isinstance(obj, dict) and "type_" in obj:
            objc  = obj
            otype = objc['type_'].encode('ascii')   # Correct?

            # don't supply a dict - we want the object to initialize with all its defaults intact,
            # which preserves things like IonEnumObject and invokes the setattr behavior we want there.
            ion_obj = self._obj_registry.new(otype)

            # get outdated attributes in data that are not defined in the current schema
            extra_attributes = objc.viewkeys() - ion_obj._schema.viewkeys() - BUILT_IN_ATTRS
            for extra in extra_attributes:
                objc.pop(extra)
                log.info('discard %s not in current schema' % extra)

            for k, v in objc.iteritems():
                # unicode translate to utf8
                if isinstance(v, unicode):
                    v = str(v.encode('utf8'))
                if k != "type_":
                    setattr(ion_obj, k, v)

            return ion_obj

        return obj
Exemplo n.º 14
0
Arquivo: cc.py Projeto: pkediyal/pyon
    def stop(self):
        log.info("=============== Container stopping... ===============")

        if self.event_pub is not None:
            try:
                self.event_pub.publish_event(event_type="ContainerLifecycleEvent",
                                             origin=self.id, origin_type="CapabilityContainer",
                                             sub_type="TERMINATE",
                                             state=ContainerStateEnum.TERMINATE)
            except Exception as ex:
                log.exception(ex)

        while self._capabilities:
            capability = self._capabilities.pop()
            log.debug("stop(): Stopping '%s'" % capability)
            try:
                self._stop_capability(capability)
            except Exception as ex:
                log.exception("Container stop(): Error stop %s" % capability)

        Container.instance = None
        from pyon.core import bootstrap
        bootstrap.container_instance = None

        self._is_started = False

        log.debug("Container stopped, OK.")
Exemplo n.º 15
0
    def check_localrange(self):
        log.info('check_localrange')
        TestQCFunctions.check_localrange(self)
        self.init_check()

        flagged = Event()

        def cb(event, *args, **kwargs):
            times = event.temporal_values
            if not event.qc_parameter == 'tempwat_loclrng_qc':
                return
            np.testing.assert_array_equal(
                times,
                np.array([
                    3580144708.7555027, 3580144709.7555027, 3580144710.7555027,
                    3580144711.7555027, 3580144712.7555027
                ]))
            flagged.set()

        event_subscriber = EventSubscriber(event_type=OT.ParameterQCEvent,
                                           origin=self.dp_id,
                                           callback=cb,
                                           auto_delete=True)
        event_subscriber.start()
        self.addCleanup(event_subscriber.stop)

        self.ph.publish_rdt_to_data_product(self.dp_id, self.rdt)
        self.dataset_monitor.event.wait(10)
        rdt = RecordDictionaryTool.load_from_granule(
            self.data_retriever.retrieve(self.dataset_id))
        np.testing.assert_array_almost_equal(rdt['tempwat_loclrng_qc'],
                                             [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
        self.assertTrue(flagged.wait(10))
Exemplo n.º 16
0
    def _cleanup_outdated_entries(self, dir_entries, common="key"):
        """
        This function takes all DirEntry from the list and removes all but the most recent one
        by ts_updated timestamp. It returns the most recent DirEntry and removes the others by
        direct datastore operations.
        """
        if not dir_entries:
            return
        newest_entry = dir_entries[0]
        try:
            remove_list = []
            for de in dir_entries:
                if int(de.ts_updated) > int(newest_entry.ts_updated):
                    remove_list.append(newest_entry)
                    newest_entry = de
                elif de.key != newest_entry.key:
                    remove_list.append(de)

            log.info("Attempting to cleanup these directory entries: %s" % remove_list)
            for de in remove_list:
                try:
                    self.dir_store.delete(de)
                except Exception as ex:
                    log.warn("Removal of outdated %s directory entry failed: %s" % (common, de))
            log.info("Cleanup of %s old %s directory entries succeeded" % (len(remove_list), common))

        except Exception as ex:
            log.warn("Cleanup of multiple directory entries for %s failed: %s" % (
                common, str(ex)))

        return newest_entry
Exemplo n.º 17
0
 def _setup_session(self):
     # negotiate with the telnet client to have server echo characters
     response = input = ''
     # set socket to non-blocking
     self.connection_socket.setblocking(0)
     start_time = time.time()
     self._write(self.WILL_ECHO_CMD)
     while True:
         try:
             input = self.connection_socket.recv(100)
         except gevent.socket.error, error:
             if error[0] == errno.EAGAIN:
                 gevent.sleep(.1)
             else:
                 log.info(
                     "TcpServer._setup_session(): exception caught <%s>" %
                     str(error))
                 self._exit_handler("lost connection")
                 return False
         if len(input) > 0:
             response += input
         if self.DO_ECHO_CMD in response:
             # set socket back to blocking
             self.connection_socket.setblocking(1)
             return True
         elif time.time() - start_time > 5:
             self._exit_handler("session setup timed out")
             self._writeline(
                 "session negotiation with telnet client failed, closing connection"
             )
             return False
Exemplo n.º 18
0
    def _message_received(self, msg, headers):
        """
        Internal _message_received override.

        We need to be able to detect IonExceptions raised in the Interceptor stacks as well as in the actual
        call to the op we're routing into. This override will handle the return value being sent to the caller.
        """
        result = None
        try:
            result, response_headers = ResponseEndpointUnit._message_received(self, msg, headers)       # execute interceptor stack, calls into our message_received
        except IonException as ex:
            (exc_type, exc_value, exc_traceback) = sys.exc_info()
            tb_list = traceback.extract_tb(sys.exc_info()[2])
            tb_list = traceback.format_list(tb_list)
            tb_output = ""
            for elt in tb_list:
                tb_output += elt
            log.debug("Got error response")
            log.debug("Exception message: %s" % ex)
            log.debug("Traceback:\n%s" % tb_output)
            response_headers = self._create_error_response(ex)

        # REPLIES: propogate protocol, conv-id, conv-seq
        response_headers['protocol']    = headers.get('protocol', '')
        response_headers['conv-id']     = headers.get('conv-id', '')
        response_headers['conv-seq']    = headers.get('conv-seq', 1) + 1

        log.info("MESSAGE SEND [S->D] RPC: %s" % str(msg))

        return self.send(result, response_headers)
Exemplo n.º 19
0
    def on_start(self):
        '''
        Starts the process
        '''
        log.info('Replay Process Started')
        super(ReplayProcess,self).on_start()
        dsm_cli = DatasetManagementServiceProcessClient(process=self)
        pubsub  = PubsubManagementServiceProcessClient(process=self)

        self.dataset_id      = self.CFG.get_safe('process.dataset_id', None)
        self.delivery_format = self.CFG.get_safe('process.delivery_format',{})
        self.start_time      = self.CFG.get_safe('process.query.start_time', None)
        self.end_time        = self.CFG.get_safe('process.query.end_time', None)
        self.stride_time     = self.CFG.get_safe('process.query.stride_time', None)
        self.parameters      = self.CFG.get_safe('process.query.parameters',None)
        self.publish_limit   = self.CFG.get_safe('process.query.publish_limit', 10)
        self.tdoa            = self.CFG.get_safe('process.query.tdoa',None)
        self.stream_id       = self.CFG.get_safe('process.publish_streams.output', '')
        self.stream_def      = pubsub.read_stream_definition(stream_id=self.stream_id)
        self.stream_def_id   = self.stream_def._id
        self.replay_thread   = None

        self.publishing.clear()
        self.play.set()
        self.end.clear()

        if self.dataset_id is None:
            raise BadRequest('dataset_id not specified')

        self.dataset = dsm_cli.read_dataset(self.dataset_id)
        self.pubsub = PubsubManagementServiceProcessClient(process=self)
Exemplo n.º 20
0
    def delete_doc(self, doc, datastore_name=""):
        if not datastore_name:
            datastore_name = self.datastore_name
        try:
            db = self.server[datastore_name]
        except ValueError:
            raise BadRequest("Data store name %s is invalid" % datastore_name)
        if type(doc) is str:
            log.info('Deleting object %s/%s' % (datastore_name, doc))
            if self._is_in_association(doc, datastore_name):
                obj = self.read(doc, datastore_name)
                log.warn("XXXXXXX Attempt to delete object %s that still has associations" % str(obj))
#                raise BadRequest("Object cannot be deleted until associations are broken")
            try:
                del db[doc]
            except ResourceNotFound:
                raise NotFound('Object with id %s does not exist.' % str(doc))
        else:
            log.info('Deleting object %s/%s' % (datastore_name, doc["_id"]))
            if self._is_in_association(doc["_id"], datastore_name):
                log.warn("XXXXXXX Attempt to delete object %s that still has associations" % str(doc))
#                raise BadRequest("Object cannot be deleted until associations are broken")
            try:
                res = db.delete(doc)
            except ResourceNotFound:
                raise NotFound('Object with id %s does not exist.' % str(doc["_id"]))
            log.debug('Delete result: %s' % str(res))
Exemplo n.º 21
0
 def _clean(cls, config):
     """ Force cleans the FS root, but not any other mappings """
     if not cls.root:
         cls.root = cls._get_fs_root(config)
     log.info('Removing %s', cls.root)
     if os.path.exists(cls.root):
         shutil.rmtree(cls.root)
Exemplo n.º 22
0
    def _send(self, msg, headers=None, **kwargs):
        log.info("MESSAGE SEND [S->D] RPC: %s" % str(msg))

        try:
            res, res_headers = RequestEndpointUnit._send(self,
                                                         msg,
                                                         headers=headers,
                                                         **kwargs)
        except exception.Timeout:
            self._sample_request(-1, 'Timeout', msg, headers, '', {})
            raise

        # possibly sample before we do any raising
        self._sample_request(res_headers['status_code'],
                             res_headers['error_message'], msg, headers, res,
                             res_headers)

        # Check response header
        if res_headers["status_code"] != 200:
            log.debug("RPCRequestEndpointUnit received an error (%d): %s",
                      res_headers['status_code'], res_headers['error_message'])
            self._raise_exception(res_headers["status_code"],
                                  res_headers["error_message"])

        return res, res_headers
Exemplo n.º 23
0
    def _execute(self, cprefix, command):
        if not command:
            raise iex.BadRequest("execute argument 'command' not present")
        if not command.command:
            raise iex.BadRequest("command not set")

        cmd_res = IonObject("AgentCommandResult", command_id=command.command_id, command=command.command)
        cmd_func = getattr(self, cprefix + str(command.command), None)
        if cmd_func:
            cmd_res.ts_execute = get_ion_ts()
            try:
                res = cmd_func(*command.args, **command.kwargs)
                cmd_res.status = 0
                cmd_res.result = res
            except iex.IonException as ex:
                # TODO: Distinguish application vs. uncaught exception
                cmd_res.status = getattr(ex, 'status_code', -1)
                cmd_res.result = str(ex)
                log.warn("Agent command %s failed with trace=%s" % (command.command, traceback.format_exc()))
        else:
            log.info("Agent command not supported: %s" % (command.command))
            ex = iex.NotFound("Command not supported: %s" % command.command)
            cmd_res.status = iex.NotFound.status_code
            cmd_res.result = str(ex)

        sub_type = "%s.%s" % (command.command, cmd_res.status)
        post_event = self._event_publisher._create_event(event_type=self.COMMAND_EVENT_TYPE,
                                origin=self.resource_id, origin_type=self.ORIGIN_TYPE,
                                sub_type=sub_type, command=command, result=cmd_res)
        post_event = self._post_execute_event_hook(post_event)
        success = self._event_publisher._publish_event(post_event, origin=post_event.origin)

        return cmd_res
 def add_stream(self, dataset_id='', stream_id=''):
     log.info('Adding stream %s to dataset %s', stream_id, dataset_id)
     validate_true(
         dataset_id and stream_id,
         'Clients must provide both the dataset_id and stream_id')
     self.clients.resource_registry.create_association(
         subject=dataset_id, predicate=PRED.hasStream, object=stream_id)
Exemplo n.º 25
0
def time_it(msg="step"):
    t1 = time.time()
    try:
        yield
    finally:
        t2 = time.time()
        log.info("Time %s: %1.7f", msg, (t2 - t1))
Exemplo n.º 26
0
    def persist_data_stream(self,
                            stream_id='',
                            ingestion_configuration_id='',
                            dataset_id='',
                            config=None):
        #--------------------------------------------------------------------------------
        # Validate that the method call was indeed valid
        #--------------------------------------------------------------------------------
        config = config or {}
        validate_is_instance(stream_id, basestring,
                             'stream_id %s is not a valid string' % stream_id)
        validate_true(dataset_id,
                      'Clients must specify the dataset to persist')
        log.info('Persisting stream %s to dataset %s.', stream_id, dataset_id)

        ingestion_config = self.read_ingestion_configuration(
            ingestion_configuration_id)
        if self.is_persisted(stream_id):
            raise BadRequest('This stream is already being persisted')
        #--------------------------------------------------------------------------------
        # Set up the stream subscriptions and associations for this stream and its ingestion_type
        #--------------------------------------------------------------------------------
        if self.setup_queues(ingestion_config, stream_id, dataset_id, config):
            self.clients.pubsub_management.persist_stream(stream_id)

        return dataset_id
Exemplo n.º 27
0
    def check_localrange(self):
        log.info('check_localrange')
        self.new_rdt()
        t = np.array([3580144703.7555027, 3580144704.7555027, 3580144705.7555027, 3580144706.7555027, 3580144707.7555027, 3580144708.7555027, 3580144709.7555027, 3580144710.7555027, 3580144711.7555027, 3580144712.7555027])
        pressure = np.random.rand(10) * 2 + 33.0
        t_v = ntp_to_month(t)
        dat = t_v + pressure + np.arange(16,26)
        def lim1(p,m):
            return p+m+10
        def lim2(p,m):
            return p+m+20

        pressure_grid, month_grid = np.meshgrid(np.arange(0,150,10), np.arange(11))
        points = np.column_stack([pressure_grid.flatten(), month_grid.flatten()])
        datlim_0 = lim1(points[:,0], points[:,1])
        datlim_1 = lim2(points[:,0], points[:,1])
        datlim = np.column_stack([datlim_0, datlim_1])
        datlimz = points

        self.svm.stored_value_cas('lrt_QCTEST_TEMPWAT', {'datlim':datlim.tolist(), 'datlimz':datlimz.tolist(), 'dims':['pressure', 'month']})
        self.rdt['time'] = t
        self.rdt['temp'] = dat
        self.rdt['pressure'] = pressure
        
        self.rdt.fetch_lookup_values()

        np.testing.assert_array_equal(self.rdt['tempwat_loclrng_qc'], [1 ,1 ,1 ,1 ,1 ,0 ,0 ,0 ,0 ,0])
Exemplo n.º 28
0
    def _read_interval_time(self):
        """
        Reads the hsflowd conf file to determine what time should be used.
        """
        if not (self._hsflowd_addr == "localhost"
                or self._hsflowd_addr == "127.0.0.1"):
            log.debug(
                "Skipping reading hsflow auto file, hsflowd is not running locally"
            )
        else:
            try:
                mtime = os.stat(self._hsflowd_conf).st_mtime
            except OSError:
                # if you can't stat it, you can't read it most likely
                log.info("Could not stat hsflowd.auto file")
                return

            if mtime != self._conf_last_mod:
                self._conf_last_mod = mtime

                # appears to be simple key=value, one per line
                try:
                    with open(self._hsflowd_conf) as f:
                        while True:
                            c = f.readline()
                            if c == "":
                                break
                            elif c.startswith('polling='):
                                self._counter_interval = int(
                                    c.rstrip().split('=')[1])
                                log.debug("New polling interval time: %d",
                                          self._counter_interval)
                                break
                except IOError:
                    log.exception("Could not open/read hsflowd.auto")
Exemplo n.º 29
0
    def _create_xn(self, xn_type, name, xs=None, use_ems=True, **kwargs):
        xs = xs or self.default_xs
        log.info(
            "ExchangeManager._create_xn: type: %s, name=%s, xs=%s, kwargs=%s",
            xn_type, name, xs, kwargs)

        if xn_type == "service":
            xn = ExchangeNameService(self, self._priviledged_transport, name,
                                     xs, **kwargs)
        elif xn_type == "process":
            xn = ExchangeNameProcess(self, self._priviledged_transport, name,
                                     xs, **kwargs)
        elif xn_type == "queue":
            xn = ExchangeNameQueue(self, self._priviledged_transport, name, xs,
                                   **kwargs)
        else:
            raise StandardError("Unknown XN type: %s" % xn_type)

        self.xn_by_name[name] = xn

        if use_ems and self._ems_available():
            log.debug("Using EMS to create_xn")
            xno = ResExchangeName(name=name, xn_type=xn.xn_type)

            self._ems_client.declare_exchange_name(
                xno,
                self._get_xs_obj(xs._exchange)._id,
                headers=self._build_security_headers(
                ))  # @TODO: exchange is wrong
        else:
            self._ensure_default_declared()
            xn.declare()

        return xn
Exemplo n.º 30
0
Arquivo: cc.py Projeto: pkediyal/pyon
    def serve_forever(self):
        """ Run the container until killed. """
        log.debug("In Container.serve_forever")

        if not self.proc_manager.proc_sup.running:
            self.start()

        # serve forever short-circuits if immediate is on and children len is ok
        num_procs = len(self.proc_manager.proc_sup.children)
        immediate = CFG.system.get('immediate', False)
        if not (immediate and num_procs == 1):  # only spawned greenlet is the CC-Agent

            # print a warning just in case
            if immediate and num_procs != 1:
                log.warn("CFG.system.immediate=True but number of spawned processes is not 1 (%d)", num_procs)

            try:
                # This just waits in this Greenlet for all child processes to complete,
                # which is triggered somewhere else.
                self.proc_manager.proc_sup.join_children()
            except (KeyboardInterrupt, SystemExit) as ex:
                log.info('Received a kill signal, shutting down the container.')

                if hasattr(self, 'gl_parent_watch') and self.gl_parent_watch is not None:
                    self.gl_parent_watch.kill()

            except:
                log.exception('Unhandled error! Forcing container shutdown')
        else:
            log.debug("Container.serve_forever short-circuiting due to CFG.system.immediate")

        self.proc_manager.proc_sup.shutdown(CFG.cc.timeout.shutdown)
Exemplo n.º 31
0
    def publish_event(self,
                      event_type='',
                      origin='',
                      origin_type='',
                      sub_type='',
                      description='',
                      event_attrs=None):
        """
        This service operation assembles a new Event object based on event_type 
        (e.g. via the pyon Event publisher) with optional additional attributes from a event_attrs
        dict of arbitrary attributes.
        
        
        @param event_type   str
        @param origin       str
        @param origin_type  str
        @param sub_type     str
        @param description  str
        @param event_attrs  dict
        @retval event       !Event
        """
        event_attrs = event_attrs or {}

        event = self.event_publisher.publish_event(event_type=event_type,
                                                   origin=origin,
                                                   origin_type=origin_type,
                                                   sub_type=sub_type,
                                                   description=description,
                                                   **event_attrs)
        log.info(
            "The publish_event() method of UNS was used to publish an event: %s",
            event)

        return event
 def _splice_coverage(cls, dataset_id, scov):
     file_root = FileSystem.get_url(FS.CACHE,'datasets')
     vcov = cls._get_coverage(dataset_id,mode='a')
     scov_pth = scov.persistence_dir
     if isinstance(vcov.reference_coverage, SimplexCoverage):
         ccov = ComplexCoverage(file_root, uuid4().hex, 'Complex coverage for %s' % dataset_id, 
                 reference_coverage_locs=[vcov.head_coverage_path,],
                 parameter_dictionary=ParameterDictionary(),
                 complex_type=ComplexCoverageType.TEMPORAL_AGGREGATION)
         log.info('Creating Complex Coverage: %s', ccov.persistence_dir)
         ccov.append_reference_coverage(scov_pth)
         ccov_pth = ccov.persistence_dir
         ccov.close()
         vcov.replace_reference_coverage(ccov_pth)
     elif isinstance(vcov.reference_coverage, ComplexCoverage):
         log.info('Appending simplex coverage to complex coverage')
         #vcov.reference_coverage.append_reference_coverage(scov_pth)
         dir_path = vcov.reference_coverage.persistence_dir
         vcov.close()
         ccov = AbstractCoverage.load(dir_path, mode='a')
         ccov.append_reference_coverage(scov_pth)
         ccov.refresh()
         ccov.close()
     vcov.refresh()
     vcov.close()
Exemplo n.º 33
0
Arquivo: object.py Projeto: ooici/pyon
    def _transform(self, obj):
        # Note: This check to detect an IonObject is a bit risky (only type_)
        if isinstance(obj, dict) and "type_" in obj:
            objc  = obj
            otype = objc['type_'].encode('ascii')   # Correct?

            # don't supply a dict - we want the object to initialize with all its defaults intact,
            # which preserves things like IonEnumObject and invokes the setattr behavior we want there.
            ion_obj = self._obj_registry.new(otype)

            # get outdated attributes in data that are not defined in the current schema
            extra_attributes = objc.viewkeys() - ion_obj._schema.viewkeys() - BUILT_IN_ATTRS
            for extra in extra_attributes:
                objc.pop(extra)
                log.info('discard %s not in current schema' % extra)

            for k, v in objc.iteritems():

                # unicode translate to utf8
                if isinstance(v, unicode):
                    v = str(v.encode('utf8'))

                # CouchDB adds _attachments and puts metadata in it
                # in pyon metadata is in the document
                # so we discard _attachments while transforming between the two
                if k not in ("type_", "_attachments", "_conflicts"):
                    setattr(ion_obj, k, v)
                if k == "_conflicts":
                    log.warn("CouchDB conflict detected for ID=%S (ignored): %s", obj.get('_id', None), v)

            return ion_obj

        return obj
    def subscription_callback(self, message, headers):
        #The message body should only contain the event description for now and a standard header: "ION Event SMS"...

        """
        This callback is given to all the event subscribers that this user wants notifications for.
        If this callback gets called the user in this processor should get an email
        """

        log.debug("UserEventProcessor.subscription_callback(): message=" + str(message))
        log.debug("event type = " + str(message._get_type()))
        log.debug('type of message: %s' % type(message))

        time_stamp = str( datetime.fromtimestamp(time.mktime(time.gmtime(float(message.ts_created)/1000))))

        event = message.type_
        origin = message.origin
        description = message.description
        log.info("description: %s" % str(description))


        # build the email from the event content
        msg_body = "Description: %s" % description + '\r\n'

        msg_subject = "(SysName: " + get_sys_name() + ") ION event " + event + " from " + origin
        msg_sender = ION_NOTIFICATION_EMAIL_ADDRESS

        msg = MIMEText(msg_body)
        msg['Subject'] = msg_subject
        msg['From'] = msg_sender
        msg['To'] = self.msg_recipient
        log.debug("UserEventProcessor.subscription_callback(): sending email to %s"\
        %self.msg_recipient)
        self.smtp_client.sendmail(msg_sender, self.msg_recipient, msg.as_string())
Exemplo n.º 35
0
def time_it(msg="step"):
    t1 = time.time()
    try:
        yield
    finally:
        t2 = time.time()
        log.info("Time %s: %1.7f", msg, (t2-t1))
Exemplo n.º 36
0
def log_message(recv, msg, headers, delivery_tag=None):
    """
    Utility function to print an legible comprehensive summary of a received message.
    """
    if getattr(recv, '__iter__', False):
        recv = ".".join(str(item) for item in recv if item)
    log.info("MESSAGE RECV [S->%s]: len=%s, headers=%s", recv, len(str(msg)), headers)
Exemplo n.º 37
0
    def memory_usage(self):
        """
        Get the current memory usage for the current driver process.
        @returns memory usage in KB of the current driver process
        """
        driver_pid = self.getpid()
        if not driver_pid:
            log.warn("no process running")
            return 0

        #ps_process = subprocess.Popen(["ps", "-p", self.getpid(), "-o", "rss,pid"])
        ps_process = subprocess.Popen(["ps", "-o rss,pid", "-p %s" % self.getpid()], stdout=subprocess.PIPE)
        retcode = ps_process.poll()

        usage = 0
        for line in ps_process.stdout:
            if not line.strip().startswith('RSS'):
                try:
                    fields = line.split()
                    pid = int(fields[1])
                    if pid == driver_pid:
                        usage = int(fields[0])
                except:
                    log.warn("Failed to parse output for memory usage: %s" % line)
                    usage = 0

        if usage:
            log.info("process memory usage: %dk" % usage)
        else:
            log.warn("process not running")

        return usage
Exemplo n.º 38
0
    def start(self):

        log.debug("GovernanceController starting ...")

        self._CFG = CFG

        self.enabled = CFG.get_safe('interceptor.interceptors.governance.config.enabled', False)

        log.info("GovernanceInterceptor enabled: %s" % str(self.enabled))

        self.policy_event_subscriber = None

        #containers default to not Org Boundary and ION Root Org
        self._is_container_org_boundary = CFG.get_safe('container.org_boundary',False)
        self._container_org_name = CFG.get_safe('container.org_name', CFG.get_safe('system.root_org', 'ION'))
        self._container_org_id = None
        self._system_root_org_name = CFG.get_safe('system.root_org', 'ION')

        self._is_root_org_container = (self._container_org_name == self._system_root_org_name)

        if self.enabled:

            config = CFG.get_safe('interceptor.interceptors.governance.config')

            self.initialize_from_config(config)

            self.policy_event_subscriber = EventSubscriber(event_type=OT.PolicyEvent, callback=self.policy_event_callback)
            self.policy_event_subscriber.start()

            self.rr_client = ResourceRegistryServiceProcessClient(node=self.container.node, process=self.container)
            self.policy_client = PolicyManagementServiceProcessClient(node=self.container.node, process=self.container)
Exemplo n.º 39
0
 def deactivate(self):
     self.close()
     self._cbthread.join(timeout=5)
     self._cbthread.kill()
     self._cbthread = None
     log.info("EventSubscriber deactivated. Event pattern=%s" %
              self.binding)
Exemplo n.º 40
0
    def create_doc(self, doc, object_id=None, datastore_name=""):
        if not datastore_name:
            datastore_name = self.datastore_name
        if '_id' in doc:
            raise BadRequest("Doc must not have '_id'")
        if '_rev' in doc:
            raise BadRequest("Doc must not have '_rev'")

        if object_id:
            try:
                self.read(object_id, '', datastore_name)
                raise BadRequest("Object with id %s already exist" % object_id)
            except NotFound:
                pass

        # Assign an id to doc (recommended in CouchDB documentation)
        doc["_id"] = object_id or uuid4().hex
        log.info('Creating new object %s/%s' % (datastore_name, doc["_id"]))
        log.debug('create doc contents: %s', doc)

        # Save doc.  CouchDB will assign version to doc.
        try:
            res = self.server[datastore_name].save(doc)
        except ResourceNotFound:
            raise BadRequest("Data store %s does not exist" % datastore_name)
        except ResourceConflict:
            raise BadRequest("Object with id %s already exist" % doc["_id"])
        except ValueError:
            raise BadRequest("Data store name %s invalid" % datastore_name)
        log.debug('Create result: %s' % str(res))
        id, version = res
        return (id, version)
Exemplo n.º 41
0
    def __init__(self, host=None, port=None, datastore_name='prototype', options="", profile=DataStore.DS_PROFILE.BASIC):
        log.debug('__init__(host=%s, port=%s, datastore_name=%s, options=%s)', host, port, datastore_name, options)
        self.host = host or CFG.server.couchdb.host
        self.port = port or CFG.server.couchdb.port
        # The scoped name of the datastore
        self.datastore_name = datastore_name
        self.auth_str = ""
        try:
            if CFG.server.couchdb.username and CFG.server.couchdb.password:
                self.auth_str = "%s:%s@" % (CFG.server.couchdb.username, CFG.server.couchdb.password)
                log.debug("Using username:password authentication to connect to datastore")
        except AttributeError:
            log.error("CouchDB username:password not configured correctly. Trying anonymous...")

        connection_str = "http://%s%s:%s" % (self.auth_str, self.host, self.port)
        #connection_str = "http://%s:%s" % (self.host, self.port)
        # TODO: Security risk to emit password into log. Remove later.
        log.info('Connecting to CouchDB server: %s' % connection_str)
        self.server = couchdb.Server(connection_str)

        # Datastore specialization (views)
        self.profile = profile

        # serializers
        self._io_serializer     = IonObjectSerializer()
        # TODO: Not nice to have this class depend on ION objects
        self._io_deserializer   = IonObjectDeserializer(obj_registry=get_obj_registry())
        self._datastore_cache = {}
Exemplo n.º 42
0
    def __init__(self, host=None, port=None, datastore_name='prototype', options="", profile=DataStore.DS_PROFILE.BASIC):
        log.debug('__init__(host=%s, port=%s, datastore_name=%s, options=%s' % (host, port, datastore_name, options))
        self.host = host or CFG.server.couchdb.host
        self.port = port or CFG.server.couchdb.port
        # The scoped name of the datastore
        self.datastore_name = datastore_name
        self.auth_str = ""
        try:
            if CFG.server.couchdb.username and CFG.server.couchdb.password:
                self.auth_str = "%s:%s@" % (CFG.server.couchdb.username, CFG.server.couchdb.password)
                log.debug("Using username:password authentication to connect to datastore")
        except AttributeError:
            log.error("CouchDB username:password not configured correctly. Trying anonymous...")

        connection_str = "http://%s%s:%s" % (self.auth_str, self.host, self.port)
        #connection_str = "http://%s:%s" % (self.host, self.port)
        # TODO: Security risk to emit password into log. Remove later.
        log.info('Connecting to CouchDB server: %s' % connection_str)
        self.server = couchdb.Server(connection_str)

        # Datastore specialization
        self.profile = profile

        # serializers
        self._io_serializer     = IonObjectSerializer()
        self._io_deserializer   = IonObjectDeserializer(obj_registry=obj_registry)
 def _setup_session(self):
     # negotiate with the telnet client to have server echo characters
     response = input = ''
     # set socket to non-blocking
     self.connection_socket.setblocking(0)
     start_time = time.time()
     self._write(self.WILL_ECHO_CMD)
     while True:
         try:
             input = self.connection_socket.recv(100)
         except gevent.socket.error, error:
             if error[0] == errno.EAGAIN:
                 gevent.sleep(.1)
             else:
                 log.info("TcpServer._setup_session(): exception caught <%s>" %str(error))
                 self._exit_handler("lost connection")
                 return False
         if len(input) > 0:
             response += input
         if self.DO_ECHO_CMD in response:
             # set socket back to blocking
             self.connection_socket.setblocking(1)
             return True
         elif time.time() - start_time > 5:
             self._exit_handler("session setup timed out")
             self._writeline("session negotiation with telnet client failed, closing connection")
             return False            
Exemplo n.º 44
0
    def serve_forever(self):
        """ Run the container until killed. """
        log.debug("In Container.serve_forever")
        
        if not self.proc_manager.proc_sup.running:
            self.start()

        # serve forever short-circuits if immediate is on and children len is ok
        num_procs = len(self.proc_manager.proc_sup.children)
        immediate = CFG.system.get('immediate', False)
        if not (immediate and num_procs == 1):  # only spawned greenlet is the CC-Agent

            # print a warning just in case
            if immediate and num_procs != 1:
                log.warn("CFG.system.immediate=True but number of spawned processes is not 1 (%d)", num_procs)

            try:
                # This just waits in this Greenlet for all child processes to complete,
                # which is triggered somewhere else.
                self.proc_manager.proc_sup.join_children()
            except (KeyboardInterrupt, SystemExit) as ex:
                log.info('Received a kill signal, shutting down the container.')
                watch_parent = CFG.system.get('watch_parent', None)
                if watch_parent:
                    watch_parent.kill()
            except:
                log.exception('Unhandled error! Forcing container shutdown')
        else:
            log.debug("Container.serve_forever short-circuiting due to CFG.system.immediate")

        self.proc_manager.proc_sup.shutdown(CFG.cc.timeout.shutdown)
Exemplo n.º 45
0
    def _load_capabilities(self):
        self._cap_initialized = []  # List of capability constants initialized in container
        self._capabilities = []     # List of capability constants active in container
        self._cap_instances = {}    # Dict mapping capability->manager instance

        self._cap_definitions = Config(["res/config/container_capabilities.yml"]).data['capabilities']

        profile_filename = CFG.get_safe("container.profile", "development")
        if not profile_filename.endswith(".yml"):
            profile_filename = "res/profile/%s.yml" % profile_filename
        log.info("Loading CC capability profile from file: %s", profile_filename)
        profile_cfg = Config([profile_filename]).data
        if not isinstance(profile_cfg, dict) or profile_cfg['type'] != "profile" or not "profile" in profile_cfg:
            raise ContainerError("Container capability profile invalid: %s" % profile_filename)

        self.cap_profile = profile_cfg['profile']

        if "capabilities" in self.cap_profile and self.cap_profile['capabilities']:
            dict_merge(self._cap_definitions, self.cap_profile['capabilities'], True)

        CCAP.clear()
        cap_list = self._cap_definitions.keys()
        CCAP.update(zip(cap_list, cap_list))

        if "config" in self.cap_profile and self.cap_profile['config']:
            log.info("Container CFG was changed based on profile: %s", profile_filename)
Exemplo n.º 46
0
    def get_datastore(self, ds_name, profile=DataStore.DS_PROFILE.BASIC, config=None):
        """
        Factory method to get a datastore instance from given name, profile and config.
        @param ds_name  Logical name of datastore (will be scoped with sysname)
        @param profile  One of known constants determining the use of the store
        @param config  Override config to use
        """
        validate_true(ds_name, 'ds_name must be provided')
        if ds_name in self._datastores:
            log.debug("get_datastore(): Found instance of store '%s'" % ds_name)
            return self._datastores[ds_name]

        scoped_name = DatastoreManager.get_scoped_name(ds_name)

        # Create a datastore instance
        log.info("get_datastore(): Create instance of store '%s' as database=%s" % (ds_name, scoped_name))
        new_ds = DatastoreManager.get_datastore_instance(ds_name, profile)

        # Create store if not existing
        if not new_ds.datastore_exists(scoped_name):
            new_ds.create_datastore(scoped_name, create_indexes=True, profile=profile)
        else:
            # NOTE: This may be expensive if called more than once per container
            # If views exist and are dropped and recreated
            new_ds._define_views(profile=profile, keepviews=True)

        # Set a few standard datastore instance fields
        new_ds.local_name = ds_name
        new_ds.ds_profile = profile

        self._datastores[ds_name] = new_ds

        return new_ds
Exemplo n.º 47
0
    def start_listeners(self):
        """
        Starts all listeners in managed greenlets.

        This must be called after starting this IonProcess. Currently, the Container's ProcManager
        will handle this for you, but if using an IonProcess manually, you must remember to call
        this method or no attached listeners will run.
        """
        try:
            # disable normal error reporting, this method should only be called from startup
            self.thread_manager._failure_notify_callback = None

            # spawn all listeners in startup listeners (from initializer, or added later)
            for listener in self._startup_listeners:
                self.add_endpoint(listener)

            with Timeout(10):
                waitall([x.get_ready_event() for x in self.listeners])

        except Timeout:

            # remove failed endpoints before reporting failure above
            for listener, proc in self._listener_map.iteritems():
                if proc.proc.dead:
                    log.info("removed dead listener: %s", listener)
                    self.listeners.remove(listener)
                    self.thread_manager.children.remove(proc)

            raise IonProcessError("start_listeners did not complete in expected time")

        finally:
            self.thread_manager._failure_notify_callback = self._child_failed
    def publish_event(self, event_type='', origin='', origin_type='', sub_type='', description='', event_attrs=None):
        """
        This service operation assembles a new Event object based on event_type 
        (e.g. via the pyon Event publisher) with optional additional attributes from a event_attrs
        dict of arbitrary attributes.
        
        
        @param event_type   str
        @param origin       str
        @param origin_type  str
        @param sub_type     str
        @param description  str
        @param event_attrs  dict
        @retval event       !Event
        """
        event_attrs = event_attrs or {}

        event = self.event_publisher.publish_event(
            event_type = event_type,
            origin = origin,
            origin_type = origin_type,
            sub_type = sub_type,
            description = description,
            **event_attrs
            )
        log.info("The publish_event() method of UNS was used to publish an event: %s", event)

        return event
Exemplo n.º 49
0
 def _splice_coverage(cls, dataset_id, scov):
     file_root = FileSystem.get_url(FS.CACHE, 'datasets')
     vcov = cls._get_coverage(dataset_id, mode='a')
     scov_pth = scov.persistence_dir
     if isinstance(vcov.reference_coverage, SimplexCoverage):
         ccov = ComplexCoverage(
             file_root,
             uuid4().hex,
             'Complex coverage for %s' % dataset_id,
             reference_coverage_locs=[
                 vcov.head_coverage_path,
             ],
             parameter_dictionary=ParameterDictionary(),
             complex_type=ComplexCoverageType.TEMPORAL_AGGREGATION)
         log.info('Creating Complex Coverage: %s', ccov.persistence_dir)
         ccov.append_reference_coverage(scov_pth)
         ccov_pth = ccov.persistence_dir
         ccov.close()
         vcov.replace_reference_coverage(ccov_pth)
     elif isinstance(vcov.reference_coverage, ComplexCoverage):
         log.info('Appending simplex coverage to complex coverage')
         #vcov.reference_coverage.append_reference_coverage(scov_pth)
         dir_path = vcov.reference_coverage.persistence_dir
         vcov.close()
         ccov = AbstractCoverage.load(dir_path, mode='a')
         ccov.append_reference_coverage(scov_pth)
         ccov.refresh()
         ccov.close()
     vcov.refresh()
     vcov.close()
Exemplo n.º 50
0
    def _force_clean(cls, recreate=False, initial=False):
        # Database resources
        from pyon.core.bootstrap import get_sys_name, CFG
        from pyon.datastore.datastore_common import DatastoreFactory
        datastore = DatastoreFactory.get_datastore(config=CFG, variant=DatastoreFactory.DS_BASE, scope=get_sys_name())
        if initial:
            datastore._init_database(datastore.database)

        dbs = datastore.list_datastores()
        clean_prefix = '%s_' % get_sys_name().lower()
        things_to_clean = [x for x in dbs if x.startswith(clean_prefix)]
        try:
            for thing in things_to_clean:
                datastore.delete_datastore(datastore_name=thing)
                if recreate:
                    datastore.create_datastore(datastore_name=thing)

        finally:
            datastore.close()

        # Broker resources
        from putil.rabbitmq.rabbit_util import RabbitManagementUtil
        rabbit_util = RabbitManagementUtil(CFG, sysname=bootstrap.get_sys_name())
        deleted_exchanges, deleted_queues = rabbit_util.clean_by_sysname()
        log.info("Deleted %s exchanges, %s queues" % (len(deleted_exchanges), len(deleted_queues)))

        # File system
        from pyon.util.file_sys import FileSystem
        FileSystem._clean(CFG)
Exemplo n.º 51
0
 def retrieve_oob(cls, dataset_id='', query=None, delivery_format=''):
     query = query or {}
     coverage = None
     try:
         coverage = cls._get_coverage(dataset_id)
         if coverage is None:
             raise BadRequest('no such coverage')
         if coverage.num_timesteps == 0:
             log.info('Reading from an empty coverage')
             rdt = RecordDictionaryTool(
                 param_dictionary=coverage.parameter_dictionary)
         else:
             rdt = ReplayProcess._coverage_to_granule(
                 coverage=coverage,
                 start_time=query.get('start_time', None),
                 end_time=query.get('end_time', None),
                 stride_time=query.get('stride_time', None),
                 parameters=query.get('parameters', None),
                 stream_def_id=delivery_format,
                 tdoa=query.get('tdoa', None))
     except:
         cls._eject_cache(dataset_id)
         log.exception('Problems reading from the coverage')
         raise BadRequest('Problems reading from the coverage')
     return rdt.to_granule()
Exemplo n.º 52
0
    def delete_doc(self, doc, datastore_name=""):
        if not datastore_name:
            datastore_name = self.datastore_name
        try:
            datastore_dict = self.root[datastore_name]
        except KeyError:
            raise BadRequest('Data store ' + datastore_name +
                             ' does not exist.')

        if type(doc) is str:
            object_id = doc
        else:
            object_id = doc["_id"]

        log.info('Deleting object %s/%s' % (datastore_name, object_id))
        if object_id in datastore_dict.keys():

            if self._is_in_association(object_id, datastore_name):
                obj = self.read(object_id, "", datastore_name)
                log.warn(
                    "XXXXXXX Attempt to delete object %s that still has associations"
                    % str(obj))
#                raise BadRequest("Object cannot be deleted until associations are broken")

# Find all version dicts and delete them
            for key in datastore_dict.keys():
                if key.find(object_id + '_version_') == 0:
                    del datastore_dict[key]
            # Delete the HEAD dict
            del datastore_dict[object_id]
            # Delete the version counter dict
            del datastore_dict['__' + object_id + '_version_counter']
        else:
            raise NotFound('Object with id ' + object_id + ' does not exist.')
        log.info('Delete result: True')
Exemplo n.º 53
0
 def suspend(self):
     '''
     Stops the event loop
     '''
     self.event.set()
     self._thread.join(self.timeout)
     log.info("QC Thread Suspended")
Exemplo n.º 54
0
    def _get_execution_engine_config(self):
        ee_base_cfg = CFG.get_safe("container.execution_engine") or {}
        if ee_base_cfg.get("type", None) != "scioncc":
            raise ContainerConfigError("Execution engine config invalid: %s", ee_base_cfg)

        ee_cfg = deepcopy(ee_base_cfg)

        # If we are a child process, merge in child config override
        proc_name = multiprocessing.current_process().name
        ee_cfg["container"] = dict(child_proc_name=proc_name, is_child=False)
        child_cfgs = ee_base_cfg.get("child_configs", None) or {}
        if proc_name.startswith("Container-child-"):
            ee_cfg["container"]["is_child"] = True
            if proc_name in child_cfgs:
                log.info("Applying execution engine config override for child: %s", proc_name)
                dict_merge(ee_cfg, child_cfgs[proc_name], inplace=True)
            else:
                for cfg_name, ch_cfg in child_cfgs.iteritems():
                    pattern = ch_cfg.get("name_pattern", None)
                    if pattern and re.match(pattern, proc_name):
                        log.info("Applying execution engine config override %s for child: %s", cfg_name, proc_name)
                        dict_merge(ee_cfg, ch_cfg, inplace=True)
                        break

        ee_cfg.pop("child_configs", None)
        return ee_cfg
Exemplo n.º 55
0
    def check_localrange(self):
        log.info('check_localrange')
        self.new_rdt()
        t = np.array([3580144703.7555027, 3580144704.7555027, 3580144705.7555027, 3580144706.7555027, 3580144707.7555027, 3580144708.7555027, 3580144709.7555027, 3580144710.7555027, 3580144711.7555027, 3580144712.7555027])
        pressure = np.random.rand(10) * 2 + 33.0
        t_v = ntp_to_month(t)
        dat = t_v + pressure + np.arange(16,26)
        def lim1(p,m):
            return p+m+10
        def lim2(p,m):
            return p+m+20

        pressure_grid, month_grid = np.meshgrid(np.arange(0,150,10), np.arange(11))
        points = np.column_stack([pressure_grid.flatten(), month_grid.flatten()])
        datlim_0 = lim1(points[:,0], points[:,1])
        datlim_1 = lim2(points[:,0], points[:,1])
        datlim = np.column_stack([datlim_0, datlim_1])
        datlimz = points

        self.svm.stored_value_cas('lrt_QCTEST_TEMPWAT', {'datlim':datlim.tolist(), 'datlimz':datlimz.tolist(), 'dims':['pressure', 'month']})
        self.rdt['time'] = t
        self.rdt['temp'] = dat
        self.rdt['pressure'] = pressure
        
        self.rdt.fetch_lookup_values()

        np.testing.assert_array_equal(self.rdt['tempwat_loclrng_qc'], [1 ,1 ,1 ,1 ,1 ,0 ,0 ,0 ,0 ,0])
    def update_notification(self, notification=None, user_id = ''):
        """Updates the provided NotificationRequest object.  Throws NotFound exception if
        an existing version of NotificationRequest is not found.  Throws Conflict if
        the provided NotificationRequest object is not based on the latest persisted
        version of the object.

        @param notification     NotificationRequest
        @throws BadRequest      if object does not have _id or _rev attribute
        @throws NotFound        object with specified id does not exist
        @throws Conflict        object not based on latest persisted object version
        """

        #-------------------------------------------------------------------------------------------------------------------
        # Get the old notification
        #-------------------------------------------------------------------------------------------------------------------

        old_notification = self.clients.resource_registry.read(notification._id)

        #-------------------------------------------------------------------------------------------------------------------
        # Update the notification in the notifications dict
        #-------------------------------------------------------------------------------------------------------------------


        self._update_notification_in_notifications_dict(new_notification=notification,
                                                        old_notification=old_notification,
                                                        notifications=self.notifications)
        #-------------------------------------------------------------------------------------------------------------------
        # Update the notification in the registry
        #-------------------------------------------------------------------------------------------------------------------

        self.clients.resource_registry.update(notification)

        #-------------------------------------------------------------------------------------------------------------------
        # reading up the notification object to make sure we have the newly registered notification request object
        #-------------------------------------------------------------------------------------------------------------------

        notification_id = notification._id
        notification = self.clients.resource_registry.read(notification_id)

        #------------------------------------------------------------------------------------
        # Update the UserInfo object
        #------------------------------------------------------------------------------------

        user = self.update_user_info_object(user_id, notification, old_notification)

        #------------------------------------------------------------------------------------
        # Update the user_info dictionary maintained by UNS
        #------------------------------------------------------------------------------------

        self.update_user_info_dictionary(user_id, notification, old_notification)

        #-------------------------------------------------------------------------------------------------------------------
        # Generate an event that can be picked by notification workers so that they can update their user_info dictionary
        #-------------------------------------------------------------------------------------------------------------------
        log.info("(update notification) Publishing ReloadUserInfoEvent for updated notification")

        self.event_publisher.publish_event( event_type= "ReloadUserInfoEvent",
            origin="UserNotificationService",
            description= "A notification has been updated."
        )
Exemplo n.º 57
0
    def start_listeners(self):
        """
        Starts all listeners in managed greenlets.

        This must be called after starting this IonProcess. Currently, the Container's ProcManager
        will handle this for you, but if using an IonProcess manually, you must remember to call
        this method or no attached listeners will run.
        """
        try:
            # disable normal error reporting, this method should only be called from startup
            self.thread_manager._failure_notify_callback = None

            # spawn all listeners in startup listeners (from initializer, or added later)
            for listener in self._startup_listeners:
                self.add_endpoint(listener)

            with Timeout(10):
                waitall([x.get_ready_event() for x in self.listeners])

        except Timeout:

            # remove failed endpoints before reporting failure above
            for listener, proc in self._listener_map.iteritems():
                if proc.proc.dead:
                    log.info("removed dead listener: %s", listener)
                    self.listeners.remove(listener)
                    self.thread_manager.children.remove(proc)

            raise IonProcessError(
                "start_listeners did not complete in expected time")

        finally:
            self.thread_manager._failure_notify_callback = self._child_failed