Beispiel #1
0
    def __init__(self, host=None, port=None, datastore_name='prototype', options=""):
        log.debug('host %s port %s data store name %s options %s' % (host, port, datastore_name, options))
        try:
            self.host = host or CFG.server.couchdb.host
        except AttributeError:
            self.host = 'localhost'
        try:
            self.port = port or CFG.server.couchdb.port
        except AttributeError:
            self.port = 5984
        self.datastore_name = datastore_name
        self.auth_str = ""
        try:
            if CFG.server.couchdb.username and CFG.server.couchdb.password:
                self.auth_str = "%s:%s@" % (CFG.server.couchdb.username, CFG.server.couchdb.password)
                log.debug("Using username:password authentication to connect to datastore")
        except AttributeError:
            log.error("CouchDB username:password not configured correctly. Trying anonymous...")

        connection_str = "http://%s%s:%s" % (self.auth_str, self.host, self.port)
        #connection_str = "http://%s:%s" % (self.host, self.port)
        # TODO: Security risk to emit password into log. Remove later.
        log.info('Connecting to CouchDB server: %s' % connection_str)
        self.server = couchdb.Server(connection_str)

        # serializers
        self._io_serializer     = IonObjectSerializer()
        self._io_deserializer   = IonObjectDeserializer(obj_registry=obj_registry)
Beispiel #2
0
    def _evaluate_pdp(self, invocation, pdp, requestCtx):

        try:
            response = pdp.evaluate(requestCtx)
        except Exception, e:
            log.error("Error evaluating policies: %s" % e.message)
            return Decision.NOT_APPLICABLE
Beispiel #3
0
    def _on_underlying_close(self, code, text):
        if not (code == 0 or code == 200):
            log.error(
                "AMQPTransport.underlying closed:\n\tchannel number: %s\n\tcode: %d\n\ttext: %s",
                self.channel_number, code, text)

        # PIKA BUG: in v0.9.5, this amq_chan instance will be left around in the callbacks
        # manager, and trips a bug in the handler for on_basic_deliver. We attempt to clean
        # up for Pika here so we don't goof up when reusing a channel number.

        # this appears to be fixed in 3050d116899aced2392def2e3e66ca30c93334ac
        # https://github.com/pika/pika/commit/e93c7ebae2c57b798977ba2992602310deb4758b
        self._client.callbacks.remove(self._client.channel_number,
                                      'Basic.GetEmpty')
        self._client.callbacks.remove(self._client.channel_number,
                                      'Channel.Close')
        self._client.callbacks.remove(self._client.channel_number,
                                      '_on_basic_deliver')
        self._client.callbacks.remove(self._client.channel_number,
                                      '_on_basic_get')

        # uncomment these lines to see the full callback list that Pika maintains
        #stro = pprint.pformat(callbacks._callbacks)
        #log.error(str(stro))

        for cb in self._close_callbacks:
            cb(self, code, text)
Beispiel #4
0
    def _evaluate_pdp(self, pdp, requestCtx):

        try:
            response = pdp.evaluate(requestCtx)
        except Exception, e:
            log.error("Error evaluating policies: %s" % e.message)
            return Decision.NOT_APPLICABLE_STR
Beispiel #5
0
 def load_dictionaries(self, path, contexts):
     try:
         body = None
         with open(path) as f:
             body = yaml.load(f)
         for name, context_names in body.iteritems():
             temporal_contexts = [
                 cname for cname in context_names if '*' in cname
             ]
             temporal_context = temporal_contexts[0].strip(
                 '*') if temporal_contexts else ''
             context_names = [
                 context_name
                 if '*' not in context_name else context_name.strip('*')
                 for context_name in context_names
             ]
             context_ids = [
                 contexts[i] for i in context_names if i in contexts
             ]
             self.dataset_management.create_parameter_dictionary(
                 name=name,
                 parameter_context_ids=context_ids,
                 temporal_context=temporal_context)
     except Exception as e:
         log.error('Problem loading dictionaries, stopping: %s', e.message)
Beispiel #6
0
    def _read_by_path(self, path, orgname=None, mult_keys=None):
        """
        Given a qualified path, find entry in directory and return DirEntry object or None if not found.
        """
        if path is None:
            raise BadRequest("Illegal arguments")
        orgname = orgname or self.orgname
        if mult_keys:
            parent = path or "/"
            key = mult_keys
        else:
            parent, key = path.rsplit("/", 1)
            parent = parent or "/"
        find_key = [orgname, key, parent]
        view_res = self.dir_store.find_by_view('directory', 'by_key', key=find_key, id_only=True, convert_doc=True)

        match = [doc for docid, index, doc in view_res]
        if mult_keys:
            entries_by_key = {doc.key: doc for doc in match}
            entries = [entries_by_key.get(key, None) for key in mult_keys]
            return entries
        else:
            if len(match) > 1:
                log.error("More than one directory entry found for key %s" % path)
                return match[0]
            elif match:
                return match[0]
            return None
Beispiel #7
0
    def trigger_policy_update(self, policy_id):

        try:

            #TODO - Find a better way to work with org_id - use ION Org for now.
            ion_org, _ = self.rr_client.find_resources(
                restype=RT.Org, name=CFG.system.root_org)

            resource_list, _ = self.rr_client.find_subjects(
                "", PRED.hasPolicy, policy_id)
            for res in resource_list:
                #TODO - may figure out a better way to get the name of the Resource Type - or maybe this is ok
                resource_type = res.__class__.__name__
                #log.debug("Resource Type: %s" % resource_type)
                if resource_type == 'ServiceDefinition':
                    policy_rules = self.policy_client.get_active_service_policy_rules(
                        ion_org[0]._id, res.name)
                    self.update_resource_policy(res.name, policy_rules)
                elif resource_type == 'Org':
                    self.update_all_resource_policy(res._id)
                else:
                    policy_rules = self.policy_client.get_active_resource_policy_rules(
                        res._id)
                    self.update_resource_policy(res._id, policy_rules)

        except Exception, e:
            log.error(e.message)
Beispiel #8
0
    def outgoing(self, invocation):
        payload = invocation.message

        # Compliance: Make sure sent message objects support DotDict as arguments.
        # Although DotDict is subclass of dict, msgpack does not like it
        if isinstance(payload, IonMessageObjectBase):
            for k, v in payload.__dict__.iteritems():
                if isinstance(v, DotDict):
                    setattr(payload, k, v.as_dict())

        # Msgpack the content to binary str - does nested IonObject encoding
        try:
            invocation.message = msgpack.packb(payload, default=encode_ion)
        except Exception:
            log.error("Illegal type in IonObject attributes: %s", payload)
            raise BadRequest("Illegal type in IonObject attributes")

        # Make sure no Nones exist in headers - this indicates a problem somewhere up the stack.
        # pika will choke hard on them as well, masking the actual problem, so we catch here.
        nonelist = [(k, v) for k, v in invocation.headers.iteritems()
                    if v is None]
        if nonelist:
            raise BadRequest("Invalid headers containing None values: %s" %
                             str(nonelist))

        msg_size = len(invocation.message)
        if msg_size > self.max_message_size:
            raise BadRequest(
                'The message size %s is larger than the max_message_size value of %s'
                % (msg_size, self.max_message_size))

        return invocation
 def retrieve_oob(cls, dataset_id='', query=None, delivery_format=''):
     query = query or {}
     coverage = None
     try:
         coverage = cls._get_coverage(dataset_id)
         if coverage is None:
             raise BadRequest('no such coverage')
         if isinstance(coverage, SimplexCoverage) and coverage.is_empty():
             log.info('Reading from an empty coverage')
             rdt = RecordDictionaryTool(param_dictionary=coverage.parameter_dictionary)
         else:
             args = {
                 'start_time'     : query.get('start_time', None),
                 'end_time'       : query.get('end_time', None),
                 'stride_time'    : query.get('stride_time', None),
                 'parameters'     : query.get('parameters', None),
                 'stream_def_id'  : delivery_format,
                 'tdoa'           : query.get('tdoa', None),
                 'sort_parameter' : query.get('sort_parameter', None)
             }
             rdt = ReplayProcess._cov2granule(coverage=coverage, **args)
     except Exception as e:
         cls._eject_cache(dataset_id)
         data_products, _ = Container.instance.resource_registry.find_subjects(object=dataset_id, predicate=PRED.hasDataset, subject_type=RT.DataProduct)
         for data_product in data_products:
             log.error("Data Product %s (%s) had issues reading from the coverage model\nretrieve_oob(dataset_id='%s', query=%s, delivery_format=%s)", data_product.name, data_product._id, dataset_id, query, delivery_format)
         log.error("Problems reading from the coverage", exc_info=True)
         raise BadRequest('Problems reading from the coverage')
     return rdt.to_granule()
def build_error_response(e):

    if hasattr(e,'get_stacks'):
        #Process potentially multiple stacks.
        full_error = ''
        for i in range(len(e.get_stacks())):
            full_error += e.get_stacks()[i][0] + "\n"
            if i == 0:
                full_error += string.join(traceback.format_exception(*sys.exc_info()), '')
            else:
                for ln in e.get_stacks()[i][1]:
                    full_error += str(ln)  + "\n"

        exec_name = e.__class__.__name__
    else:
        exc_type, exc_obj, exc_tb = sys.exc_info()
        exec_name = exc_type.__name__
        full_error = traceback.format_exception(*sys.exc_info())

    if service_gateway_instance.log_errors:
        log.error(full_error)

    result = {
        GATEWAY_ERROR_EXCEPTION : exec_name,
        GATEWAY_ERROR_MESSAGE : str(e.message),
        GATEWAY_ERROR_TRACE : full_error
    }

    if request.args.has_key(RETURN_FORMAT_PARAM):
        return_format = convert_unicode(request.args[RETURN_FORMAT_PARAM])
        if return_format == RETURN_FORMAT_RAW_JSON:
            return service_gateway_app.response_class(result, mimetype='application/json')

    return json_response({'data': {GATEWAY_ERROR: result }} )
Beispiel #11
0
    def _read_by_path(self, path, orgname=None, mult_keys=None):
        """
        Given a qualified path, find entry in directory and return DirEntry object or None if not found.
        """
        if path is None:
            raise BadRequest("Illegal arguments")
        orgname = orgname or self.orgname
        if mult_keys:
            parent = path or "/"
            key = mult_keys
        else:
            parent, key = path.rsplit("/", 1)
            parent = parent or "/"
        find_key = [orgname, key, parent]
        view_res = self.dir_store.find_by_view('directory', 'by_key', key=find_key, id_only=True, convert_doc=True)

        match = [doc for docid, index, doc in view_res]
        if mult_keys:
            entries_by_key = {doc.key: doc for doc in match}
            entries = [entries_by_key.get(key, None) for key in mult_keys]
            return entries
        else:
            if len(match) > 1:
                log.error("More than one directory entry found for key %s" % path)
                return match[0]
            elif match:
                return match[0]
            return None
    def process_local_range_test(self, coverage, parameter, input_name, datlim,
                                 datlimz, dims):
        return  # Not ready
        qc_array = self.get_parameter_values(coverage, parameter.name)
        indexes = np.where(qc_array == -88)[0]

        from ion_functions.qc.qc_functions import dataqc_localrangetest_wrapper
        # dat
        value_array = self.get_parameter_values(coverage, input_name)[indexes]
        time_array = self.get_parameter_values(
            coverage, coverage.temporal_parameter_name)[indexes]

        # datlim is an argument and comes from the lookup table
        # datlimz is an argument and comes from the lookup table
        # dims is an argument and is created using the column headings
        # pval_callback, well as for that...
        # TODO: slice_ is the window of the site data product, but for
        # now we'll just use a global slice
        slice_ = slice(None)

        def parameter_callback(param_name):
            return coverage.get_parameter_values(param_name, slice_)

        qc_array = dataqc_localrangetest_wrapper(value_array, datlim, datlimz,
                                                 dims, parameter_callback)
        return_dictionary = {
            coverage.temporal_parameter_name: time_array[indexes],
            parameter.name: qc_array[indexes]
        }
        log.error("Here's what it would look like\n%s", return_dictionary)
Beispiel #13
0
    def get_resource_commitments(self, actor_id, resource_id):

        log.debug("Finding commitments for actor_id: %s and resource_id: %s" %
                  (actor_id, resource_id))

        try:

            commitments, _ = self._rr.find_objects(resource_id,
                                                   PRED.hasCommitment,
                                                   RT.Commitment)
            if not commitments:
                return None

            cur_time = int(get_ion_ts())
            commitment_list = []
            for com in commitments:  #TODO - update when Retired is removed from find_objects
                if com.consumer == actor_id and com.lcstate != LCS.RETIRED and ( com.expiration == 0 or \
                ( com.expiration > 0 and cur_time < com.expiration)):
                    commitment_list.append(com)

            if commitment_list:
                return commitment_list

        except Exception, e:
            log.error(e)
def process_oms_event():

    json_params = {}

    # oms direct request
    if request.data:
        json_params = simplejson.loads(str(request.data))
        log.debug('ServiceGatewayService:process_oms_event request.data:  %s',
                  json_params)

    #validate payload
    if 'platform_id' not in json_params or 'message' not in json_params:
        log.warning('Invalid OMS event format. payload_data: %s', json_params)
        #return gateway_json_response(OMS_BAD_REQUEST_RESPONSE)

    #prepare the event information
    try:
        #create a publisher to relay OMS events into the system as DeviceEvents
        event_publisher = EventPublisher()

        event_publisher.publish_event(
            event_type='OMSDeviceStatusEvent',
            origin_type='OMS Platform',
            origin=json_params.get('platform_id', 'NOT PROVIDED'),
            sub_type='',
            description=json_params.get('message', ''),
            status_details=json_params)
    except Exception, e:
        log.error('Could not publish OMS  event: %s. Event data: %s',
                  e.message, json_params)
    def _coverage_to_granule(cls, coverage, start_time=None, end_time=None, stride_time=None, fuzzy_stride=True, parameters=None, stream_def_id=None, tdoa=None):
        slice_ = slice(None) # Defaults to all values


        # Validations
        if start_time is not None:
            validate_is_instance(start_time, Number, 'start_time must be a number for striding.')
        if end_time is not None:
            validate_is_instance(end_time, Number, 'end_time must be a number for striding.')
        if stride_time is not None:
            validate_is_instance(stride_time, Number, 'stride_time must be a number for striding.')

        if tdoa is not None and isinstance(tdoa,slice):
            slice_ = tdoa
        
        elif stride_time is not None and not fuzzy_stride: # SLOW 
            ugly_range = np.arange(start_time, end_time, stride_time)
            idx_values = [cls.get_time_idx(coverage,i) for i in ugly_range]
            idx_values = list(set(idx_values)) # Removing duplicates - also mixes the order of the list!!!
            idx_values.sort()
            slice_ = [idx_values]


        elif not (start_time is None and end_time is None):
            if start_time is not None:
                start_time = cls.get_time_idx(coverage,start_time)
            if end_time is not None:
                end_time = cls.get_time_idx(coverage,end_time)

            slice_ = slice(start_time,end_time,stride_time)
            log.info('Slice: %s', slice_)

        if stream_def_id:
            rdt = RecordDictionaryTool(stream_definition_id=stream_def_id)
        else:
            rdt = RecordDictionaryTool(param_dictionary=coverage.parameter_dictionary)
        if parameters is not None:
            # TODO: Improve efficiency here
            fields = list(set(parameters).intersection(rdt.fields))
        else:
            fields = rdt.fields

        for field in fields:
            log.info( 'Slice is %s' , slice_)
            n = coverage.get_parameter_values(field,tdoa=slice_)
            if n is None:
                rdt[field] = [n]
            elif isinstance(n,np.ndarray):
                if coverage.get_data_extents(field)[0] < coverage.num_timesteps:
                    log.error("Misformed coverage detected, padding with fill_value")
                    arr_len = utils.slice_shape(slice_, (coverage.num_timesteps,))[0]
                    fill_arr = np.empty(arr_len - n.shape[0] , dtype=n.dtype)
                    fill_arr.fill(coverage.get_parameter_context(field).fill_value)
                    n = np.append(n,fill_arr)
                elif coverage.get_data_extents(field)[0] > coverage.num_timesteps:
                    raise CorruptionError('The coverage is corrupted:\n\tfield: %s\n\textents: %s\n\ttimesteps: %s' % (field, coverage.get_data_extents(field), coverage.num_timesteps))
                rdt[field] = np.atleast_1d(n)
            else:
                rdt[field] = [n]
        return rdt
Beispiel #16
0
def shutdown_or_die(delay_sec=0):
    """
    Wait the given number of seconds and forcibly kill this OS process if it's still running.
    """

    def diediedie(*args):
        pid = os.getpid()
        log.warn("Container shutdown timeout. Send KILL signal (pid %d).", pid)
        os.kill(pid, signal.SIGKILL)

    def dontdie():
        signal.alarm(0)

    if delay_sec > 0:
        try:
            #old = signal.signal(signal.SIGALRM, diediedie)
            signal.alarm(int(delay_sec))

            #if old:
            #    log.warn("shutdown_or_die found a previously registered ALARM and overrode it.")
        except ValueError as ex:
            log.error("Failed to set failsafe shutdown signal. This only works on UNIX platforms.")
    else:
        diediedie()

    return dontdie
Beispiel #17
0
    def launch(self):
        """
        Launches the simulator process as indicated by _COMMAND.

        @return (rsn_oms, uri) A pair with the CIOMSSimulator instance and the
                associated URI to establish connection with it.
        """
        log.debug("[OMSim] Launching: %s", _COMMAND)

        self._process = self._spawn(_COMMAND)

        if not self._process or not self.poll():
            msg = "[OMSim] Failed to launch simulator: %s" % _COMMAND
            log.error(msg)
            raise Exception(msg)

        log.debug("[OMSim] process started, pid: %s", self.getpid())

        # give it some time to start up
        sleep(5)

        # get URI:
        uri = None
        with open("logs/rsn_oms_simulator.yml", buffering=1) as f:
            # we expect one of the first few lines to be of the form:
            # rsn_oms_simulator_uri=xxxx
            # where xxxx is the uri -- see oms_simulator_server.
            while uri is None:
                line = f.readline()
                if line.index("rsn_oms_simulator_uri=") == 0:
                    uri = line[len("rsn_oms_simulator_uri="):].strip()

        self._rsn_oms = CIOMSClientFactory.create_instance(uri)
        return self._rsn_oms, uri
Beispiel #18
0
    def create_doc_mult(self, docs, object_ids=None, allow_ids=False):
        if not allow_ids:
            if any(["_id" in doc for doc in docs]):
                raise BadRequest("Docs must not have '_id'")
            if any(["_rev" in doc for doc in docs]):
                raise BadRequest("Docs must not have '_rev'")
        if object_ids and len(object_ids) != len(docs):
            raise BadRequest("Invalid object_ids")
        if type(docs) is not list:
            raise BadRequest("Invalid type for docs:%s" % type(docs))

        if object_ids:
            for doc, oid in zip(docs, object_ids):
                doc["_id"] = oid
        else:
            for doc in docs:
                doc["_id"] = doc.get("_id", None) or uuid4().hex

        # Update docs.  CouchDB will assign versions to docs.
        db,_ = self._get_datastore()
        res = db.update(docs)
        self._count(create_mult_call=1, create_mult_obj=len(docs))
        if not all([success for success, oid, rev in res]):
            errors = ["%s:%s" % (oid, rev) for success, oid, rev in res if not success]
            log.error('create_doc_mult had errors. Successful: %s, Errors: %s' % (len(res) - len(errors), "\n".join(errors)))
        else:
            log.debug('create_doc_mult result: %s', str(res))
        return res
Beispiel #19
0
 def map_cov_rdt(cls, coverage, rdt, field, slice_):
     log.trace('Slice is %s', slice_)
     try:
         n = coverage.get_parameter_values(field, tdoa=slice_)
     except ParameterFunctionException:
         return
     if n is None:
         rdt[field] = [n]
     elif isinstance(n, np.ndarray):
         if coverage.get_data_extents(field)[0] < coverage.num_timesteps:
             log.error(
                 "Misformed coverage detected, padding with fill_value")
             arr_len = utils.slice_shape(slice_,
                                         (coverage.num_timesteps, ))[0]
             fill_arr = np.empty(arr_len - n.shape[0], dtype=n.dtype)
             fill_arr.fill(coverage.get_parameter_context(field).fill_value)
             n = np.append(n, fill_arr)
         elif coverage.get_data_extents(field)[0] > coverage.num_timesteps:
             raise CorruptionError(
                 'The coverage is corrupted:\n\tfield: %s\n\textents: %s\n\ttimesteps: %s'
                 % (field, coverage.get_data_extents(field),
                    coverage.num_timesteps))
         rdt[field] = np.atleast_1d(n)
     else:
         rdt[field] = [n]
Beispiel #20
0
    def _spawned_proc_failed(self, gproc):
        log.error("ProcManager._spawned_proc_failed: %s, %s", gproc,
                  gproc.exception)

        # for now - don't worry about the mapping, if we get a failure, just kill the container.
        # leave the mapping in place for potential expansion later.

        #        # look it up in mapping
        #        if not gproc in self._spawned_proc_to_process:
        #            log.warn("No record of gproc %s in our map (%s)", gproc, self._spawned_proc_to_process)
        #            return
        #
        prc = self._spawned_proc_to_process.get(gproc, None)
        #
        #        # make sure prc is in our list
        #        if not prc in self.procs.values():
        #            log.warn("prc %s not found in procs list", prc)
        #            return

        # stop the rest of the process
        if prc is not None:
            try:
                self.terminate_process(prc.id, False)
            except Exception as e:
                log.warn(
                    "Problem while stopping rest of failed process %s: %s",
                    prc, e)
            finally:
                self._call_proc_state_changed(prc, ProcessStateEnum.FAILED)
        else:
            log.warn("No ION process found for failed proc manager child: %s",
                     gproc)
def build_error_response(e):

    if hasattr(e, "get_stacks"):
        # Process potentially multiple stacks.
        full_error = ""
        for i in range(len(e.get_stacks())):
            full_error += e.get_stacks()[i][0] + "\n"
            if i == 0:
                full_error += string.join(traceback.format_exception(*sys.exc_info()), "")
            else:
                for ln in e.get_stacks()[i][1]:
                    full_error += str(ln) + "\n"

        exec_name = e.__class__.__name__
    else:
        exc_type, exc_obj, exc_tb = sys.exc_info()
        exec_name = exc_type.__name__
        full_error = traceback.format_exception(*sys.exc_info())

    if service_gateway_instance.log_errors:
        log.error(full_error)

    result = {
        GATEWAY_ERROR_EXCEPTION: exec_name,
        GATEWAY_ERROR_MESSAGE: str(e.message),
        GATEWAY_ERROR_TRACE: full_error,
    }

    if request.args.has_key(RETURN_MIMETYPE_PARAM):
        return_mimetype = str(request.args[RETURN_MIMETYPE_PARAM])
        return service_gateway_app.response_class(result, mimetype=return_mimetype)

    return json_response({"data": {GATEWAY_ERROR: result}})
Beispiel #22
0
 def start_communication(self):
     log.debug('start_communication')
     try:
         self.connection = self._start_connection()
         self.connection.ioloop.start()
     except Exception,e:
         log.error('exception in communication thread: %s', str(e), exc_info=True)
Beispiel #23
0
def get_valid_resource_commitments(resource_id=None, actor_id=None):
    '''
    Returns the list of valid commitments for the specified resource.
    If optional actor_id is supplied, then filtered by actor_id
    @param resource_id:
    @param actor_id:
    @return:
    '''
    log.debug("Finding commitments for resource_id: %s and actor_id: %s" % (resource_id, actor_id))

    if resource_id is None:
        return None

    try:
        gov_controller = bootstrap.container_instance.governance_controller
        commitments,_ = gov_controller.rr.find_objects(resource_id, PRED.hasCommitment, RT.Commitment)
        if not commitments:
            return None

        cur_time = int(get_ion_ts())
        commitment_list = []
        for com in commitments:
            if ( actor_id == None or com.consumer == actor_id )  and ( int(com.expiration) == 0 or ( int(com.expiration) > 0 and cur_time < int(com.expiration))):
                commitment_list.append(com)

        if commitment_list:
            return commitment_list

    except Exception, e:
        log.error(e)
Beispiel #24
0
    def __init__(self, *args, **kwargs):
        BaseContainerAgent.__init__(self, *args, **kwargs)

        # Coordinates the container start
        self._status = INIT

        self._is_started = False
        # set container id and cc_agent name (as they are set in base class call)
        self.id = get_default_container_id()
        self.name = "cc_agent_%s" % self.id
        self.start_time = get_ion_ts()

        bootstrap.container_instance = self
        Container.instance = self
        self.container = self  # Make self appear as process to service clients
        self.CCAP = CCAP
        self.CFG = CFG

        log.debug("Container (sysname=%s) initializing ..." %
                  bootstrap.get_sys_name())

        # Keep track of the overrides from the command-line, so they can trump app/rel file data
        self.spawn_args = kwargs

        # Greenlet context-local storage
        self.context = LocalContextMixin()

        # Load general capabilities file and augment with specific profile
        self._load_capabilities()

        # Start the capabilities
        start_order = self.cap_profile['start_order']
        for cap in start_order:
            if cap not in self._cap_definitions:
                raise ContainerError(
                    "CC capability %s not defined in profile" % cap)
            if cap in self._capabilities or cap in self._cap_instances:
                raise ContainerError("CC capability %s already initialized" %
                                     cap)
            try:
                cap_def = self._cap_definitions[cap]
                log.debug("__init__(): Initializing '%s'" % cap)
                cap_obj = named_any(cap_def['class'])(container=self)
                self._cap_instances[cap] = cap_obj
                if 'depends_on' in cap_def and cap_def['depends_on']:
                    dep_list = cap_def['depends_on'].split(',')
                    for dep in dep_list:
                        dep = dep.strip()
                        if dep not in self._cap_initialized:
                            raise ContainerError(
                                "CC capability %s dependent on non-existing capability %s"
                                % (cap, dep))
                if 'field' in cap_def and cap_def['field']:
                    setattr(self, cap_def['field'], cap_obj)
                self._cap_initialized.append(cap)
            except Exception as ex:
                log.error("Container Capability %s init error: %s" % (cap, ex))
                raise

        log.debug("Container initialized, OK.")
    def is_resource_acquired_exclusively(self, actor_id='', resource_id=''):
        """Returns True if the specified resource_id has been acquired exclusively. The actor_id is optional, as the operation can
        return True if the resource is acquired exclusively by any actor or specifically by the specified actor_id,
        otherwise False is returned.

        @param actor_id    str
        @param resource_id    str
        @retval success    bool
        @throws BadRequest    if resource_id is not specified
        """
        if not resource_id:
            raise BadRequest("The resource_id parameter is missing")

        try:
            cur_time = int(get_ion_ts())
            commitments,_ = self.clients.resource_registry.find_objects(resource_id,PRED.hasCommitment, RT.Commitment)
            if commitments:
                for com in commitments:
                    if com.lcstate == LCS.RETIRED: #TODO remove when RR find_objects does not include retired objects
                        continue

                    #If the expiration is not 0 make sure it has not expired
                    if ( actor_id is None or actor_id == com.consumer )  and com.commitment.exclusive and\
                       int(com.expiration) > 0 and cur_time < int(com.expiration):
                        return True

        except Exception, e:
            log.error('is_resource_acquired_exclusively: %s for actor_id:%s and resource_id:%s' %  (e.message, actor_id, resource_id))
Beispiel #26
0
 def target(self):
     try:
         while not self._shutting_down:
             self.send_heartbeats()
             self._shutdown_event.wait(timeout=self.heartbeat_secs)
     except:
         log.error('thread died', exc_info=True)
Beispiel #27
0
def get_resource_commitments(actor_id, resource_id):
    '''
    Returns the list of commitments for the specified user and resource
    @param actor_id:
    @param resource_id:
    @return:
    '''
    log.debug("Finding commitments for actor_id: %s and resource_id: %s" % (actor_id, resource_id))

    try:
        gov_controller = bootstrap.container_instance.governance_controller
        commitments,_ = gov_controller.rr.find_objects(resource_id, PRED.hasCommitment, RT.Commitment)
        if not commitments:
            return None

        cur_time = int(get_ion_ts())
        commitment_list = []
        for com in commitments:  #TODO - update when Retired is removed from find_objects
            if com.consumer == actor_id and com.lcstate != LCS.RETIRED and ( com.expiration == 0 or\
                                                                             ( com.expiration > 0 and cur_time < com.expiration)):
                commitment_list.append(com)

        if commitment_list:
            return commitment_list

    except Exception, e:
        log.error(e)
def build_error_response(e):

    if hasattr(e, 'get_stacks'):
        #Process potentially multiple stacks.
        full_error = ''
        for i in range(len(e.get_stacks())):
            full_error += e.get_stacks()[i][0] + "\n"
            if i == 0:
                full_error += string.join(
                    traceback.format_exception(*sys.exc_info()), '')
            else:
                for ln in e.get_stacks()[i][1]:
                    full_error += str(ln) + "\n"

        exec_name = e.__class__.__name__
    else:
        exc_type, exc_obj, exc_tb = sys.exc_info()
        exec_name = exc_type.__name__
        full_error = traceback.format_exception(*sys.exc_info())

    if service_gateway_instance.log_errors:
        log.error(full_error)

    result = {
        GATEWAY_ERROR_EXCEPTION: exec_name,
        GATEWAY_ERROR_MESSAGE: str(e.message),
        GATEWAY_ERROR_TRACE: full_error
    }

    if request.args.has_key(RETURN_MIMETYPE_PARAM):
        return_mimetype = str(request.args[RETURN_MIMETYPE_PARAM])
        return service_gateway_app.response_class(result,
                                                  mimetype=return_mimetype)

    return json_response({'data': {GATEWAY_ERROR: result}})
Beispiel #29
0
    def _spawned_proc_failed(self, gproc):
        log.error("ProcManager._spawned_proc_failed: %s, %s", gproc, gproc.exception)

        # for now - don't worry about the mapping, if we get a failure, just kill the container.
        # leave the mapping in place for potential expansion later.

#        # look it up in mapping
#        if not gproc in self._spawned_proc_to_process:
#            log.warn("No record of gproc %s in our map (%s)", gproc, self._spawned_proc_to_process)
#            return
#
        prc = self._spawned_proc_to_process.get(gproc, None)
#
#        # make sure prc is in our list
#        if not prc in self.procs.values():
#            log.warn("prc %s not found in procs list", prc)
#            return

        # stop the rest of the process
        if prc is not None:
            try:
                self.terminate_process(prc.id, False)
            except Exception as e:
                log.warn("Problem while stopping rest of failed process %s: %s", prc, e)
            finally:
                self._call_proc_state_changed(prc, ProcessStateEnum.FAILED)
        else:
            log.warn("No ION process found for failed proc manager child: %s", gproc)

        #self.container.fail_fast("Container process (%s) failed: %s" % (svc, gproc.exception))

        # Stop the container if this was the last process
        if not self.procs and CFG.get_safe("container.processes.exit_once_empty", False):
            self.container.fail_fast("Terminating container after last process (%s) failed: %s" % (gproc, gproc.exception))
Beispiel #30
0
    def __init__(self, host=None, port=None, datastore_name='prototype', options="", profile=DataStore.DS_PROFILE.BASIC):
        log.debug('__init__(host=%s, port=%s, datastore_name=%s, options=%s)', host, port, datastore_name, options)
        self.host = host or CFG.server.couchdb.host
        self.port = port or CFG.server.couchdb.port
        # The scoped name of the datastore
        self.datastore_name = datastore_name
        self.auth_str = ""
        try:
            if CFG.server.couchdb.username and CFG.server.couchdb.password:
                self.auth_str = "%s:%s@" % (CFG.server.couchdb.username, CFG.server.couchdb.password)
                log.debug("Using username:password authentication to connect to datastore")
        except AttributeError:
            log.error("CouchDB username:password not configured correctly. Trying anonymous...")

        connection_str = "http://%s%s:%s" % (self.auth_str, self.host, self.port)
        #connection_str = "http://%s:%s" % (self.host, self.port)
        # TODO: Security risk to emit password into log. Remove later.
        log.info('Connecting to CouchDB server: %s' % connection_str)
        self.server = couchdb.Server(connection_str)

        # Datastore specialization (views)
        self.profile = profile

        # serializers
        self._io_serializer     = IonObjectSerializer()
        # TODO: Not nice to have this class depend on ION objects
        self._io_deserializer   = IonObjectDeserializer(obj_registry=get_obj_registry())
        self._datastore_cache = {}
Beispiel #31
0
    def _new_transport(self, ch_number=None):
        """
        Creates a new AMQPTransport with an underlying Pika channel.
        """
        amq_chan = blocking_cb(self.client.channel,
                               'on_open_callback',
                               channel_number=ch_number)
        if amq_chan is None:
            log.error(
                "AMQCHAN IS NONE THIS SHOULD NEVER HAPPEN, chan number requested: %s",
                ch_number)
            from pyon.container.cc import Container
            if Container.instance is not None:
                Container.instance.fail_fast(
                    "AMQCHAN IS NONE, messaging has failed", True)
            raise StandardError(
                "AMQCHAN IS NONE THIS SHOULD NEVER HAPPEN, chan number requested: %s"
                % ch_number)

        transport = AMQPTransport(amq_chan)

        # return the pending in collection (lets this number be assigned again later)
        self.client._pending.remove(transport.channel_number)

        # by default, everything should have a prefetch count of 1 (configurable)
        # this can be overridden by the channel get_n related methods
        transport.qos_impl(prefetch_count=CFG.get_safe(
            'container.messaging.endpoint.prefetch_count', 1))

        return transport
    def process_local_range_test(self, coverage, parameter, input_name, datlim, datlimz, dims):
        qc_array = coverage.get_parameter_values(parameter.name)
        indexes = np.where(qc_array == -88)[0]

        from ion_functions.qc.qc_functions import dataqc_localrangetest_wrapper
        # dat
        value_array = coverage.get_parameter_values(input_name)
        time_array = coverage.get_parameter_values(coverage.temporal_parameter_name)

        # datlim is an argument and comes from the lookup table
        # datlimz is an argument and comes from the lookup table
        # dims is an argument and is created using the column headings
        # pval_callback, well as for that...
        # TODO: slice_ is the window of the site data product, but for 
        # now we'll just use a global slice
        slice_ = slice(None)
        def parameter_callback(param_name):
            return coverage.get_parameter_values(param_name, slice_)


        qc_array = dataqc_localrangetest_wrapper(value_array, datlim, datlimz, dims, parameter_callback)
        return_dictionary = {
                coverage.temporal_parameter_name : time_array[indexes],
                parameter.name : qc_array[indexes]
        }
        log.error("Here's what it would look like\n%s", return_dictionary)
Beispiel #33
0
    def testDelete(self):
        docs = [{
            'a': '1',
            'b': '2',
            '_id': '1'
        }, {
            'a': '1',
            'b': '2',
            '_id': '2'
        }, {
            'a': '1',
            'b': '2',
            '_id': '3'
        }, {
            'a': '1',
            'b': '2',
            '_id': '4'
        }, {
            'a': '1',
            'b': '2',
            '_id': '5'
        }]
        self.subject.insert(docs)

        self.subject.delete(['2', '3', '4'])
        try:
            succ, id, doc = self.subject.read('3')
        except Exception, e:
            log.error('throws', exc_info=True)
            self.fail('should return, not raise exception %s' % str(e))
Beispiel #34
0
    def __init__(self, host=None, port=None, datastore_name='prototype', options="", profile=DataStore.DS_PROFILE.BASIC):
        log.debug('__init__(host=%s, port=%s, datastore_name=%s, options=%s' % (host, port, datastore_name, options))
        self.host = host or CFG.server.couchdb.host
        self.port = port or CFG.server.couchdb.port
        # The scoped name of the datastore
        self.datastore_name = datastore_name
        self.auth_str = ""
        try:
            if CFG.server.couchdb.username and CFG.server.couchdb.password:
                self.auth_str = "%s:%s@" % (CFG.server.couchdb.username, CFG.server.couchdb.password)
                log.debug("Using username:password authentication to connect to datastore")
        except AttributeError:
            log.error("CouchDB username:password not configured correctly. Trying anonymous...")

        connection_str = "http://%s%s:%s" % (self.auth_str, self.host, self.port)
        #connection_str = "http://%s:%s" % (self.host, self.port)
        # TODO: Security risk to emit password into log. Remove later.
        log.info('Connecting to CouchDB server: %s' % connection_str)
        self.server = couchdb.Server(connection_str)

        # Datastore specialization
        self.profile = profile

        # serializers
        self._io_serializer     = IonObjectSerializer()
        self._io_deserializer   = IonObjectDeserializer(obj_registry=obj_registry)
def process_oms_event():

    json_params = {}

    # oms direct request
    if request.data:
        json_params  = json_loads(str(request.data))
        log.debug('ServiceGatewayService:process_oms_event request.data:  %s', json_params)

    #validate payload
    if 'platform_id' not in json_params or 'message' not in json_params:
        log.warning('Invalid OMS event format. payload_data: %s', json_params)
        #return gateway_json_response(OMS_BAD_REQUEST_RESPONSE)

    #prepare the event information
    try:
        #create a publisher to relay OMS events into the system as DeviceEvents
        event_publisher = EventPublisher()

        event_publisher.publish_event(
            event_type='OMSDeviceStatusEvent',
            origin_type='OMS Platform',
            origin=json_params.get('platform_id', 'NOT PROVIDED'),
            sub_type='',
            description = json_params.get('message', ''),
            status_details = json_params)
    except Exception, e:
        log.error('Could not publish OMS  event: %s. Event data: %s', e.message, json_params)
Beispiel #36
0
    def create_doc_mult(self, docs, object_ids=None, allow_ids=False):
        if not allow_ids:
            if any(["_id" in doc for doc in docs]):
                raise BadRequest("Docs must not have '_id'")
            if any(["_rev" in doc for doc in docs]):
                raise BadRequest("Docs must not have '_rev'")
        if object_ids and len(object_ids) != len(docs):
            raise BadRequest("Invalid object_ids")
        if type(docs) is not list:
            raise BadRequest("Invalid type for docs:%s" % type(docs))

        if object_ids:
            for doc, oid in zip(docs, object_ids):
                doc["_id"] = oid
        else:
            for doc in docs:
                doc["_id"] = doc.get("_id", None) or uuid4().hex

        # Update docs.  CouchDB will assign versions to docs.
        db,_ = self._get_datastore()
        res = db.update(docs)
        if not all([success for success, oid, rev in res]):
            errors = ["%s:%s" % (oid, rev) for success, oid, rev in res if not success]
            log.error('create_doc_mult had errors. Successful: %s, Errors: %s' % (len(res) - len(errors), "\n".join(errors)))
        else:
            log.debug('create_doc_mult result: %s', str(res))
        return res
Beispiel #37
0
    def outgoing(self, invocation):
        payload = invocation.message

        # Compliance: Make sure sent message objects support DotDict as arguments.
        # Although DotDict is subclass of dict, msgpack does not like it
        if isinstance(payload, IonMessageObjectBase):
            for k, v in payload.__dict__.iteritems():
                if isinstance(v, DotDict):
                    setattr(payload, k, v.as_dict())

        # Msgpack the content to binary str - does nested IonObject encoding
        try:
            invocation.message = msgpack.packb(payload, default=encode_ion)
        except Exception:
            log.error("Illegal type in IonObject attributes: %s", payload)
            raise BadRequest("Illegal type in IonObject attributes")

        # Make sure no Nones exist in headers - this indicates a problem somewhere up the stack.
        # pika will choke hard on them as well, masking the actual problem, so we catch here.
        nonelist = [(k, v) for k, v in invocation.headers.iteritems() if v is None]
        if nonelist:
            raise BadRequest("Invalid headers containing None values: %s" % str(nonelist))

        msg_size = len(invocation.message)
        if msg_size > self.max_message_size:
            raise BadRequest('The message size %s is larger than the max_message_size value of %s' % (
                msg_size, self.max_message_size))

        return invocation
Beispiel #38
0
 def update_user_profile(self, user_ooi_id='', name='', institution='', email_address='', profile=[OrderedDict([('name', ''), ('value', '')])]):
     try:
         user_info = self.clients.identity_registry.update_user(name = name, email = email_address, variables = profile)
         log.debug("User updated")
         return user_info
     except:
         # TODO throw not found exception
         log.error("Find failed")
 def event_processing_loop(self):
     '''
     Processes the events in the event queue
     '''
     log.error("Processing event queue")
     self.event_queue.put(StopIteration)
     for event in self.event_queue:
         log.error("My event's reference designator: %s", event.origin)
Beispiel #40
0
 def fail_fast(self, err_msg=""):
     """
     Container needs to shut down and NOW.
     """
     log.error("Fail Fast: %s", err_msg)
     self.stop()
     log.error("Fail Fast: killing container")
     os.kill(os.getpid(), signal.SIGTERM)
 def register_dap_dataset(self, dataset_id, data_product_name=''):
     coverage_path = DatasetManagementService._get_coverage_path(dataset_id)
     try:
         self.add_dataset_to_xml(coverage_path=coverage_path, product_name=data_product_name)
         self.create_symlink(coverage_path, self.pydap_data_path)
     except: # We don't re-raise to prevent clients from bombing out...
         log.exception('Problem registering dataset')
         log.error('Failed to register dataset for coverage path %s' % coverage_path)
 def test_verify_importer_service_online(self):
     try:
         r = requests.get(self.importer_service_url)
         self.assertTrue(r.status_code == 200)
     except Exception as e:
         #make it fail
         log.error("check service is started on port...%s", e)
         self.assertTrue(False)
Beispiel #43
0
 def test_verify_importer_service_online(self):
     try:
         r = requests.get(self.importer_service_url)
         self.assertTrue(r.status_code == 200)
     except Exception as e:
         #make it fail
         log.error("check service is started on port...%s", e)
         self.assertTrue(False)
 def get_user(self, user_ooi_id=""):
     try:
         user_info = self.clients.identity_registry.find_user_by_id(user_ooi_id)
         log.debug("User found")
         return user_info
     except:
         # TODO throw not found exception
         log.error("Find failed")
 def event_processing_loop(self):
     '''
     Processes the events in the event queue
     '''
     log.error("Processing event queue")
     self.event_queue.put(StopIteration)
     for event in self.event_queue:
         log.error("My event's reference designator: %s", event.origin)
 def event_processing_loop(self):
     '''
     Processes the events in the event queue
     '''
     log.error("Processing event queue")
     self.event_queue.put(StopIteration)
     for event in self.event_queue:
         log.error("My event: %s", event)
Beispiel #47
0
    def _privileged_transport_closed(self, name, transport, code, text):
        """
        Callback for when the privileged transport is closed.

        If it's an error close, this is bad and will fail fast the container.
        """
        if not (code == 0 or code == 200):
            log.error("The privileged transport has failed (%s: %s)", code, text)
            self.container.fail_fast("ExManager privileged transport (broker %s) has failed (%s: %s)" % (name, code, text), True)
 def get_user(self, user_ooi_id=''):
     try:
         user_info = self.clients.identity_registry.find_user_by_id(
             user_ooi_id)
         log.debug("User found")
         return user_info
     except:
         # TODO throw not found exception
         log.error("Find failed")
Beispiel #49
0
    def _child_failed(self, gproc):
        """
        Handler method for when any child worker thread dies with error.
        Aborts the "ioloop" greenlet.
        """
        log.error("Child (%s) failed with an exception: %s", gproc, gproc.exception)

        if self.gl_ioloop:
            self.gl_ioloop.kill(exception=gproc.exception, block=False)
    def register_user(self, certificate="", rsa_private_key=""):
        '''
        This op is overloaded.  If the user has never been seen before,
        we attempt to create an OOI user and assign the subject from
        the certificate to the OOI user profile.  The user is then
        "logged in".
        If the user has been seen before, we just "log in" the user.
        '''

        # If default or not all parameters are provided, short circuit and
        # return anonymous user id
        if certificate == "" or rsa_private_key == "":
            return {
                "ooi_id": "ANONYMOUS",
                "user_is_admin": false,
                "user_already_registered": true,
                "user_is_early_adopter": false,
                "user_is_data_provider": false,
                "user_is_marine_operator": false
            }
        # Else, delegate to Identity Registry
        else:
            # Extract subject from certificate
            # Force conversion to str since it comes across the wire as unicode
            x509 = X509.load_cert_string(str(certificate), format=1)

            subject = str(x509.get_subject())
            is_existing_user = False
            try:
                userinfo = self.clients.identity_registry.find_user_by_subject(
                    subject)
                log.debug("User found")
                is_existing_user = True
            # TODO figure out right exception to catch
            except NotFound as ex:
                # TODO for now just going to use CN portion of subject
                # as new user name
                subj_name = subject.split("/CN=")[-1]
                try:
                    userinfo = self.clients.identity_registry.create_user(
                        subj_name)
                    log.debug("User create succeeded")
                except Exception as ex:
                    # TODO throw exception
                    log.error("Create failed")

            ret = {"ooi_id": userinfo._id}
            ret["user_already_registered"] = is_existing_user
            ret["user_is_admin"] = userinfo.roles.find("ADMIN") != -1
            ret["user_is_early_adopter"] = userinfo.roles.find(
                "EARLY_ADOPTER") != -1
            ret["user_is_data_provider"] = userinfo.roles.find(
                "DATA_PROVIDER") != -1
            ret["user_is_marine_operator"] = userinfo.roles.find(
                "MARINE_OPERATOR") != -1

            return ret
Beispiel #51
0
    def _child_failed(self, gproc):
        # extract any PyonThreadTracebacks - one should be last
        extra = ""
        if len(gproc.exception.args) and isinstance(gproc.exception.args[-1], PyonThreadTraceback):
            extra = "\n" + str(gproc.exception.args[-1])

        log.error("Child failed with an exception: (%s) %s%s", gproc, gproc.exception, extra)
        if self._failure_notify_callback:
            self._failure_notify_callback(gproc)
Beispiel #52
0
    def _priviledged_transport_closed(self, transport, code, text):
        """
        Callback for when the priviledged transport is closed.

        If it's an error close, this is bad and will fail fast the container.
        """
        if not (code == 0 or code == 200):
            log.error("The priviledged transport has failed (%s: %s)", code, text)
            self.container.fail_fast("ExManager priviledged transport has failed (%s: %s)" % (code, text), True)
Beispiel #53
0
def encode_ion(obj):
    """
    msgpack object hook to encode granule/numpy types and IonObjects.
    This hook works also for non-basic types nested within other types, e.g.
    it will be called for a top level IonObject and for any potential nested IonObjects.
    """

    if isinstance(obj, IonObjectBase):
        # There must be a type_ in here so that the object can be decoded
        if not isinstance(obj, IonMessageObjectBase) and not hasattr(
                obj, "type_"):
            log.error("IonObject with no type_: %s", obj)
        return obj.__dict__

    if isinstance(obj, list):
        return {'t': EncodeTypes.LIST, 'o': tuple(obj)}

    if isinstance(obj, set):
        return {'t': EncodeTypes.SET, 'o': tuple(obj)}

    if has_numpy and isinstance(obj, np.ndarray):
        #return {'t': EncodeTypes.NPARRAY, 'o': obj.tolist(), 'd': obj.dtype.str}
        return {
            't': EncodeTypes.NPARRAY,
            'o': obj.tostring(),
            'd': repr(obj.dtype)[6:-1],
            's': obj.shape
        }

    if isinstance(obj, complex):
        return {'t': EncodeTypes.COMPLEX, 'o': (obj.real, obj.imag)}

    if has_numpy and isinstance(obj, np.number):
        if isinstance(obj, numpy_floats):
            return {
                't': EncodeTypes.NPVAL,
                'o': float(obj.astype(float)),
                'd': obj.dtype.str
            }
        elif isinstance(obj, numpy_ints):
            return {
                't': EncodeTypes.NPVAL,
                'o': int(obj.astype(int)),
                'd': obj.dtype.str
            }
        else:
            raise TypeError('Unsupported type "%s"' % type(obj))

    if isinstance(obj, slice):
        return {'t': EncodeTypes.SLICE, 'o': (obj.start, obj.stop, obj.step)}

    if has_numpy and isinstance(obj, np.dtype):
        return {'t': EncodeTypes.DTYPE, 'o': obj.str}

    # Must raise type error for any unknown object
    raise TypeError('Unknown type "%s" in user specified encoder: "%s"' %
                    (type(obj), obj))
Beispiel #54
0
 def fail_fast(self, err_msg=""):
     """
     Container needs to shut down and NOW.
     """
     log.error("Fail Fast: %s", err_msg)
     self.stop()
     log.error("Fail Fast: killing container")
     # The exit code of the terminated process is set to non-zero
     os.kill(os.getpid(), signal.SIGTERM)
Beispiel #55
0
 def register_dap_dataset(self, dataset_id, data_product_name=''):
     coverage_path = DatasetManagementService._get_coverage_path(dataset_id)
     try:
         self.add_dataset_to_xml(coverage_path=coverage_path,
                                 product_name=data_product_name)
         self.create_symlink(coverage_path, self.pydap_data_path)
     except:  # We don't re-raise to prevent clients from bombing out...
         log.exception('Problem registering dataset')
         log.error('Failed to register dataset for coverage path %s' %
                   coverage_path)
Beispiel #56
0
    def on_quit(self):
        self._ack_all()
        self.subscriber.close()
        if self.subscriber._chan._amq_chan is not None:
            log.error('Channel is still attached, forcing closure.')
            self.subscriber._chan.close_impl()

        self.done.set()
        self.greenlet.join(5)
        self.greenlet = None
    def get_actor_header(self, actor_id):

        actor_header = self.build_actor_header(DEFAULT_ACTOR_ID, {})

        if actor_id:
            try:
                header_roles = self.find_roles_by_actor(actor_id)
                actor_header = self.build_actor_header(actor_id, header_roles)
            except Exception, e:
                log.error(e)