def unassign_device_from_site(self, device_id='', site_id=''):
        """Disconnects a device (any type) from a site (any subtype)

        @param device_id    str
        @param site_id    str
        @throws NotFound    object with specified id does not exist
        """
        site_obj = self.subsite.read_one(site_id)
        site_type = site_obj._get_type()

        if RT.PlatformSite == site_type:
            self.platform_site.unlink_device(site_id, device_id)
        elif RT.InstrumentSite == site_type:
            self.instrument_site.unlink_device(site_id, device_id)
        else:
            raise BadRequest("Tried to unassign a device from a %s resource" %
                             site_type)
    def read_conversation_role(self, conversation_role_id=''):
        """Returns an existing Conversation Role resource.

        @param conversation_role_id    str
        @retval conversation_role    ConversationRole
        @throws NotFound    object with specified id does not exist
        """
        if not conversation_role_id:
            raise BadRequest("The conversation_role_id parameter is missing")

        conversation_role = self.clients.resource_registry.read(
            conversation_role_id)
        if not conversation_role:
            raise NotFound("Conversation Role '%s' does not exist" %
                           conversation_role_id)

        return conversation_role
 def __validate_token_string(self, token_string, user_info):
     # Find the token from the  UserInfo
     token_obj = [
         token for token in user_info.tokens
         if token.token_string == token_string
     ]
     if not token_obj or not token_obj[0].merge_email or not token_obj[
             0].expires:
         raise NotFound("__validate_token: Token data not found")
     token_obj = token_obj[0]
     # Validate the expiration time and token status
     current_time = calendar.timegm((datetime.utcnow()).timetuple())
     if current_time > token_obj.expires or "OPEN" != token_obj.status:
         raise BadRequest(
             "__validate_token: access token expired or token status is invalid"
         )
     return token_obj
Beispiel #4
0
    def query_geo_bbox(self, source_id='', field='', top_left=None, bottom_right=None, order=None, limit=0, offset=0, id_only=False):
        validate_true(isinstance(top_left, (list,tuple)), 'Top Left is not a list or a tuple')
        validate_true(len(top_left)==2, 'Top Left is not of the right size: (2)')
        validate_true(isinstance(bottom_right, (list,tuple)), 'Bottom Right is not a list or a tuple')
        validate_true(len(bottom_right)==2, 'Bottom Right is not of the right size: (2)')

        if not self.use_es:
            raise BadRequest('Can not make queries without ElasticSearch, enable in res/config/pyon.yml')

        es = ep.ElasticSearch(host=self.elasticsearch_host, port=self.elasticsearch_port)
        source = self.clients.resource_registry.read(source_id)

        iterate = self._multi(self.query_geo_bbox, source=source, field=field, top_left=top_left, bottom_right=bottom_right, order=order, limit=limit, offset=offset, id_only=id_only)
        if iterate is not None:
            return iterate

        index = source
        validate_is_instance(index,ElasticSearchIndex, '%s does not refer to a valid index.' % index)

        sorts = ep.ElasticSort()
        if order is not None and isinstance(order,dict):
            sort_field = order.keys()[0]
            value = order[sort_field]
            sorts.sort(sort_field,value)
            es.sorted(sorts)

        if limit:
            es.size(limit)

        if offset:
            es.from_offset(offset)

        if field == '*':
            field = '_all'


        filter = ep.ElasticFilter.geo_bounding_box(field, top_left, bottom_right)

        es.filtered(filter)

        query = ep.ElasticQuery.match_all()

        response = IndexManagementService._es_call(es.search_index_advanced,index.index_name,query)
        IndexManagementService._check_response(response)

        return self._results_from_response(response,id_only)
    def update_policy(self, policy=None):
        """Updates the provided Policy object.  Throws NotFound exception if
        an existing version of Policy is not found.  Throws Conflict if
        the provided Policy object is not based on the latest persisted
        version of the object.

        @param policy    Policy
        @throws NotFound    object with specified id does not exist
        @throws BadRequest    if object does not have _id or _rev attribute
        @throws Conflict    object not based on latest persisted object version
        """
        if not is_basic_identifier(policy.name):
            raise BadRequest(
                "The policy name '%s' can only contain alphanumeric and underscore characters"
                % policy.name)

        self.clients.resource_registry.update(policy)
    def query_range(self, source_id='', field='', from_value=None, to_value=None, order=None, limit=0, offset=0, id_only=False):
        
        if not self.use_es:
            raise BadRequest('Can not make queries without ElasticSearch, enable in res/config/pyon.yml')

        validate_true(not from_value is None, 'from_value not specified')
        validate_true(isinstance(from_value,int) or isinstance(from_value,float), 'from_value is not a valid number')
        validate_true(not to_value is None, 'to_value not specified')
        validate_true(isinstance(to_value,int) or isinstance(to_value,float), 'to_value is not a valid number')
        validate_true(source_id, 'source_id not specified')

        es = ep.ElasticSearch(host=self.elasticsearch_host, port=self.elasticsearch_port)


        source = self.clients.resource_registry.read(source_id)

        #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 
        # If source is a view, catalog or collection go through it and recursively call query_range on all the results in the indexes
        #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 
        iterate = self._multi(self.query_range, source, field=field, from_value=from_value, to_value=to_value, order=order, limit=limit, offset=offset, id_only=id_only)
        if iterate is not None:
            return iterate

        index = source
        validate_is_instance(index,ElasticSearchIndex,'%s does not refer to a valid index.' % source_id)
        if order:
            validate_is_instance(order,dict,'Order is incorrect.')
            es.sort(**order)

        if limit:
            es.size(limit)

        if field == '*':
            field = '_all'

        query = ep.ElasticQuery().range(
            field      = field,
            from_value = from_value,
            to_value   = to_value
        )

        response = IndexManagementService._es_call(es.search_index_advanced,index.index_name,query)

        IndexManagementService._check_response(response)

        return self._results_from_response(response, id_only)
    def query_term(self, source_id='', field='', value='', order=None, limit=0, offset=0, id_only=False):
        '''
        Elasticsearch Query against an index
        > discovery.query_index('indexID', 'name', '*', order={'name':'asc'}, limit=20, id_only=False)
        '''
        if not self.use_es:
            raise BadRequest('Can not make queries without ElasticSearch, enable system.elasticsearch to make queries.')

        validate_true(source_id, 'Unspecified source_id')
        validate_true(field, 'Unspecified field')
        validate_true(value, 'Unspecified value')


        es = ep.ElasticSearch(host=self.elasticsearch_host, port=self.elasticsearch_port)

        source = self.clients.resource_registry.read(source_id)

        #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 
        # If source is a view, catalog or collection go through it and recursively call query_range on all the results in the indexes
        #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 
        iterate = self._multi(self.query_term, source, field=field, value=value, order=order, limit=limit, offset=offset, id_only=id_only)
        if iterate is not None:
            return iterate


        index = source
        validate_is_instance(index, ElasticSearchIndex, '%s does not refer to a valid index.' % index)
        if order: 
            validate_is_instance(order,dict, 'Order is incorrect.')
            es.sort(**order)

        if limit:
            es.size(limit)

        if offset:
            es.from_offset(offset)

        if field == '*':
            field = '_all'

        query = ep.ElasticQuery().wildcard(field=field, value=value)
        response = IndexManagementService._es_call(es.search_index_advanced,index.index_name,query)

        IndexManagementService._check_response(response)

        return self._results_from_response(response, id_only)
Beispiel #8
0
        def command_loop():
            while True:
                try:
                    cmd = self._queue.pop(0)

                except IndexError:
                    # No command available, sleep for a while.
                    gevent.sleep(.1)
                    continue

                if self._id == 'fake_id':
                    log.debug('Processing fake command.')
                    worktime = cmd.kwargs.get('worktime', None)
                    if worktime:
                        worktime = random.uniform(0, worktime)
                        gevent.sleep(worktime)
                    payload = cmd.kwargs.get('payload', None)
                    result = payload or 'fake_result'
                else:
                    cmdstr = cmd.command
                    args = cmd.args
                    kwargs = cmd.kwargs

                    try:
                        log.debug('Remote endpoint attempting command: %s',
                                  cmdstr)
                        func = getattr(self._client, cmdstr)
                        result = func(*args, **kwargs)
                        log.debug('Remote endpoint command %s got result %s',
                                  cmdstr, str(result))

                    except AttributeError, TypeError:
                        # The command does not exist.
                        errstr = 'Unable to call remote command %s.' % cmdstr
                        log.error(errstr)
                        result = BadRequest(errstr)

                    except IonException as ex:
                        # populate result with error.
                        log.error(str(ex))
                        result = ex

                    except Exception as ex:
                        # populate result with error.
                        log.error(str(ex))
                        result = ServerError(str(ex))
    def deploy_device_to_site(self, device_id='', site_id=''):
        """
        link a device to a site as the primary instrument
        """
        #Check that the Site does not already have an associated primary device
        prim_device_ids, _ = self.clients.resource_registry.find_objects(
            site_id, PRED.hasDevice, RT.InstrumentDevice, True)
        if len(prim_device_ids) != 0:
            raise BadRequest(
                "Site %s already has a primary device associated with id %s",
                str(site_id), str(prim_device_ids[0]))
        else:
            # Create the links
            self.clients.resource_registry.create_association(
                site_id, PRED.hasDevice, device_id)

        return
Beispiel #10
0
    def find_res_by_lcstate(self, lcstate, restype=None, id_only=False):
        log.debug("find_res_by_lcstate(lcstate=%s, restype=%s)", lcstate,
                  restype)
        if type(id_only) is not bool:
            raise BadRequest('id_only must be type bool, not %s' %
                             type(id_only))
        ds, datastore_name = self._get_datastore()
        view = ds.view(self._get_viewname("resource", "by_lcstate"),
                       include_docs=(not id_only))
        is_hierarchical = (lcstate in CommonResourceLifeCycleSM.STATE_ALIASES)
        # lcstate is a hiearachical state and we need to treat the view differently
        if is_hierarchical:
            key = [1, lcstate]
        else:
            key = [0, lcstate]
        if restype:
            key.append(restype)
        endkey = list(key)
        endkey.append(END_MARKER)
        rows = view[key:endkey]

        if is_hierarchical:
            res_assocs = [
                dict(lcstate=row['key'][3],
                     type=row['key'][2],
                     name=row['key'][4],
                     id=row.id) for row in rows
            ]
        else:
            res_assocs = [
                dict(lcstate=row['key'][1],
                     type=row['key'][2],
                     name=row['key'][3],
                     id=row.id) for row in rows
            ]

        log.debug("find_res_by_lcstate() found %s objects", len(res_assocs))
        if id_only:
            res_ids = [row.id for row in rows]
            return (res_ids, res_assocs)
        else:
            res_docs = [
                self._persistence_dict_to_ion_object(row.doc) for row in rows
            ]
            return (res_docs, res_assocs)
Beispiel #11
0
    def terminate_process(self, process_id):
        service_instance = self.procs.get(process_id, None)
        if not service_instance:
            raise BadRequest(
                "Cannot terminate. Process id='%s' unknown on container id='%s'"
                % (process_id, self.container.id))

        service_instance.quit()

        # terminate IonProcessThread (may not have one, i.e. simple process)
        if service_instance._process:
            service_instance._process.notify_stop()
            service_instance._process.stop()

        del self.procs[process_id]

        self.container.directory.unregister_safe(
            "/Containers/%s/Processes" % self.container.id,
            service_instance.id)

        # Cleanup for specific process types
        if service_instance._proc_type == "service":
            listen_name = get_safe(service_instance.CFG, "process.listen_name",
                                   service_instance.name)
            self.container.directory.unregister_safe(
                "/Services/%s" % listen_name, service_instance.id)
            remaining_workers = self.container.directory.find_entries(
                "/Services/%s" % listen_name)
            if remaining_workers and len(remaining_workers) == 2:
                self.container.directory.unregister_safe(
                    "/Services", listen_name)

        elif service_instance._proc_type == "agent":
            self.container.directory.unregister_safe("/Agents",
                                                     service_instance.id)

        self.container.event_pub.publish_event(
            event_type="ProcessLifecycleEvent",
            origin=service_instance.id,
            origin_type="ContainerProcess",
            sub_type="TERMINATE",
            container_id=self.container.id,
            process_type=service_instance._proc_type,
            process_name=service_instance._proc_name,
            state=ProcessStateEnum.TERMINATE)
Beispiel #12
0
    def complete_account_merge(self, token_string=""):
        '''
        Completes merging the two accounts after verifying the token string
        @throws BadRequest  A parameter is missing
        @throws NotFound  Merge data not found
        '''
        log.debug("complete_account_merge with token string: %s" % token_string)
        if not token_string:
            raise BadRequest("complete_account_merge: token_str must be set")

        # Get current UserInfo
        current_user_id = self.__get_current_user_id()
        current_user_info = self.find_user_info_by_id(current_user_id)

        # Find all the necessary data of the merge account
        token_obj = self.__validate_token_string(token_string, current_user_info)
        merge_user_info_obj = self.find_user_info_by_email(token_obj.merge_email)  # Find UserInfo of the merge account
        merge_user_info_id = merge_user_info_obj._id
        subjects, associations = self.clients.resource_registry.find_subjects(RT.ActorIdentity, PRED.hasInfo, merge_user_info_id)  # Find ActorIdentity of the merge account
        if not associations:
            raise NotFound("complete_account_merge: ActorIdentity and UserInfo association does not exist. UserInfo ID: %s" % merge_user_info_id)
        merge_actor_identity_id = subjects[0]._id

        # Find  UserCredentials of the merge account
        merge_user_credential_id, matches = self.clients.resource_registry.find_objects(merge_actor_identity_id, PRED.hasCredentials, RT.UserCredentials, id_only=True)
        if not merge_user_credential_id:
            raise NotFound("complete_account_merge: UserCredentials and ActorIdentity association does not exist.ActorIdentity ID: %s" % merge_actor_identity_id)
        merge_user_credential_id = merge_user_credential_id[0]

        # Remove association bewteeen ActorIdentity and UserCreentials
        log.debug("complete_account_merge: merge account data: merge_user_info_id: %s merge_user_credential_id:%s merge_actor_identity_id:%s" % (merge_user_info_id, merge_user_credential_id, merge_actor_identity_id))
        self.delete_user_credential_association(merge_user_credential_id, merge_actor_identity_id)

        # Remove the merge account ActorIdentity and merge UserInfo.
        self.delete_user_info(merge_user_info_id, merge_actor_identity_id)
        self.delete_actor_identity(merge_actor_identity_id)

        # Create association between the current user ActorIdentity and the merge user UserCredentials
        self.clients.resource_registry.create_association(current_user_id, PRED.hasCredentials, merge_user_credential_id)

        # Update the token status
        token_obj.status = "VERIFIED"
        self.update_user_info(current_user_info)
        log.debug("complete_account_merge: account merge completed from %s to %s" %(merge_actor_identity_id, current_user_id))
        return True
Beispiel #13
0
    def _get_expire_time(self, task):
        now = self._now()
        now_posix = self._convert_to_posix_time(now)
        expires_in = []
        if type(task) == TimeOfDayTimer:
            for time_of_day in task.times_of_day:
                expire_time_input = datetime(year=now.year,
                                             month=now.month,
                                             day=now.day,
                                             hour=time_of_day['hour'],
                                             minute=time_of_day['minute'],
                                             second=time_of_day['second'])
                # Find out if the time is already passed for today. If it is schedule it for tomorrow
                if expire_time_input.time() > now.time():
                    expire_time = datetime(year=now.year,
                                           month=now.month,
                                           day=now.day,
                                           hour=time_of_day['hour'],
                                           minute=time_of_day['minute'],
                                           second=time_of_day['second'])
                else:
                    tomorrow_posix = self._convert_to_posix_time(now +
                                                                 timedelta(
                                                                     days=1))
                    tomorrow = now + timedelta(days=1)
                    if task.expires < tomorrow_posix:
                        # This occurs in the following case:
                        # Current time is 8am. Timer setup is 6am. Expire time is tomorrow at 5am
                        # Example current time is 8:00AM. User setups a timer for 6:00AM. Since it is 8am, it tries to
                        #   setup timer for tomorrow  6am but the expire time is set at 5AM tomorrow
                        raise BadRequest(
                            "SchedulerService: couldn't setup a timer. Please check the timer and the expiration time"
                        )
                    expire_time = datetime(year=tomorrow.year,
                                           month=tomorrow.month,
                                           day=tomorrow.day,
                                           hour=time_of_day['hour'],
                                           minute=time_of_day['minute'],
                                           second=time_of_day['second'])

                expires_in.append(ceil((expire_time - now).total_seconds()))
        elif type(task) == IntervalTimer and (task.end_time == -1 or (
            (now_posix + task.interval) <= task.end_time)):
            expires_in = [(self._calculate_next_interval(task, now_posix))]
        return expires_in
    def add_post_interpolated(self, data_product_id, parameter_name):
        '''
        Adds the post-recovery calibration parameter

        E.g. 
        add_post_deployment(ctd_parsed_data_product_id, seawater_temperature)
        
        Creates a new parameter
        'tempwat_l1b_interp'
        '''

        dps_code, parameter = self._get_base_parameter(data_product_id,
                                                       parameter_name)
        level = self._level(parameter)
        if not level:
            raise BadRequest("Parameter lacks a DPS level")
        pf_id = self.find_interpolate()
        if not pf_id:
            raise NotFound(
                "secondary interpolation function not defined as a parameter function"
            )
        prefix = dps_code.lower() + level
        parameters = {
            prefix + 'b_interp': {
                "description":
                '%s Secondary Calibration Interpolated' % parameter.name,
                "display_name":
                '%s Secondary Calibration Interpolated' %
                parameter.display_name,
                "units":
                '1',
                "parameter_type":
                'function',
                "parameter_function_id":
                pf_id,
                "parameter_function_map": {
                    'x': 'time',
                    'range0': dps_code.lower() + '_l1b_pd',
                    'range1': dps_code.lower() + '_l1b_pr',
                    'starts': dps_code.lower() + '_l1b_start',
                    'ends': dps_code.lower() + '_l1b_end'
                }
            }
        }
        self.initialize_parameters(parameters, data_product_id)
Beispiel #15
0
    def _link_resources_single_subject(self,
                                       subject_id='',
                                       association_type='',
                                       object_id='',
                                       raise_exn=True):
        """
        create an association where only one subject at a time can exist
         if there is an existing association, the choice is left to the user whether to raise exception
         or quietly remove/replace the existing one.

        @param subject_id the resource ID of the predefined type
        @param association_type the predicate
        @param object_id the resource ID of the type to be joined
        @param raise_exn whether a BadRequest error should be raised if a duplicate is attempted
        @todo check for errors: does RR check for bogus ids?
        """

        # see if there are any other objects of this type and pred on this subject
        obj_type = self._get_resource_type_by_id(object_id)
        existing_links = self._find_having(association_type, object_id)

        if len(existing_links) > 1:
            raise Inconsistent(
                "Multiple %s-%s subjects found on the same %s object with id='%s'"
                % (self.iontype, association_type, obj_type, object_id))

        if len(existing_links) > 0:
            if self._resource_link_exists(subject_id, association_type,
                                          object_id):
                log.debug(
                    "Create %s Association (single subject) from '%s': ALREADY EXISTS"
                    %
                    (self._assn_name(association_type), self._toplevel_call()))
                return None

            if raise_exn:
                raise BadRequest(
                    "Attempted to add a duplicate %s-%s association on a %s object with id='%s'"
                    % (self.iontype, association_type, obj_type, subject_id))

            self._unlink_resources(self, subject_id, association_type,
                                   existing_links[0])

        return self._link_resources_lowlevel(subject_id, association_type,
                                             object_id, False)
    def read_workflow_definition(self, workflow_definition_id=''):
        """Returns an existing Workflow Definition resource.

        @param workflow_definition_id    str
        @retval workflow_definition    WorkflowDefinition
        @throws BadRequest    if any of the required parameters are not set
        @throws NotFound    object with specified id does not exist
        """
        if not workflow_definition_id:
            raise BadRequest("The workflow_definition_id parameter is missing")

        workflow_definition = self.clients.resource_registry.read(
            workflow_definition_id)
        if not workflow_definition:
            raise NotFound("workflow_definition_id %s does not exist" %
                           workflow_definition_id)

        return workflow_definition
    def find_resource_policies(self, resource_id=''):
        """Finds all policies associated with a specific resource

        @param resource_id    str
        @retval policy_list    list
        @throws NotFound    object with specified id does not exist
        """
        if not resource_id:
            raise BadRequest("The resource_id parameter is missing")

        resource = self.clients.resource_registry.read(resource_id)
        if not resource:
            raise NotFound("Resource %s does not exist" % resource_id)

        policy_list, _ = self.clients.resource_registry.find_objects(
            resource, PRED.hasPolicy, RT.Policy)

        return policy_list
Beispiel #18
0
 def register_mult(self, entries):
     """
     Registers multiple directory entries efficiently in one datastore access.
     Note: this fails of entries are currently existing, so works for create only.
     """
     if type(entries) not in (list, tuple):
         raise BadRequest("Bad entries type")
     de_list = []
     cur_time = get_ion_ts()
     for parent, key, attrs in entries:
         de = self._create_dir_entry(object_id=create_unique_directory_id(),
                                     parent=parent,
                                     key=key,
                                     attributes=attrs,
                                     ts_created=cur_time,
                                     ts_updated=cur_time)
         de_list.append(de)
     self.datastore.create_doc_mult(de_list)
Beispiel #19
0
 def _fetch(self, n):
     '''
     Fetches n messages from the queue,
     The messages must be acknowledged before another fetch can take place
     '''
     self._fsm.process(self.I_FETCH)
     try:
         if len(self._msg_buffer) + n >= self.MESSAGE_BUFFER:
             raise BadRequest('Request exceeds maximum buffer space')
         try:
             self._msg_buffer.extend(
                 self.subscriber.get_n_msgs(n, self.TIMEOUT))
         except gevent.Timeout:
             raise Timeout
     except:
         self._fsm.process(self.I_EXCEPTION)
         return []
     return [(msg.body, msg.headers) for msg in self._msg_buffer]
Beispiel #20
0
    def delete_workflow_definition(self, workflow_definition_id=''):
        """Deletes an existing Workflow Definition resource.

        @param workflow_definition_id    str
        @throws BadRequest    if any of the required parameters are not set
        @throws NotFound    object with specified id does not exist
        """
        if not workflow_definition_id:
            raise BadRequest("The workflow_definition_id parameter is missing")

        workflow_definition = self.clients.resource_registry.read(workflow_definition_id)
        if not workflow_definition:
            raise NotFound("workflow_definition_id %s does not exist" % workflow_definition_id)


        self._delete_workflow_associations(workflow_definition_id)

        self.clients.resource_registry.delete(workflow_definition_id)
    def get_data_process_active_subscriptions_count(self, data_process_id=""):
        if not data_process_id:
            raise BadRequest(
                "The data_process_definition_id parameter is empty")

        subscription_ids, _ = self.clients.resource_registry.find_objects(
            subject=data_process_id,
            predicate=PRED.hasSubscription,
            id_only=True)
        active_count = 0
        for subscription_id in subscription_ids:
            if self.clients.pubsub_management.subscription_is_active(
                    subscription_id):
                active_count += 1
        log.debug(
            "get_data_process_active_subscriptions_count(id=%s): %s subscriptions",
            data_process_id, active_count)
        return active_count
Beispiel #22
0
    def read_doc_mult(self, object_ids, datastore_name=""):
        if not datastore_name:
            datastore_name = self.datastore_name
        try:
            datastore_dict = self.root[datastore_name]
        except KeyError:
            raise BadRequest('Data store ' + datastore_name + ' does not exist.')

        doc_list = []
        try:
            for object_id in object_ids:
                log.debug('Reading head version of object %s/%s' % (datastore_name, str(object_id)))
                doc = datastore_dict[object_id]

                doc_list.append(doc.copy())
        except KeyError:
            raise NotFound('Object with id %s does not exist.' % str(object_id))
        return doc_list
    def _manage_routes(self, routes):
        retval = {}
        for in_data_product_id, route in routes.iteritems():
            for out_data_product_id, actor in route.iteritems():
                in_stream_id = self._get_stream_from_dp(in_data_product_id)
                out_stream_id = self._get_stream_from_dp(out_data_product_id)
                if actor:
                    actor = self.clients.resource_registry.read(actor)
                    if isinstance(actor, TransformFunction):
                        actor = {'module': actor.module, 'class': actor.cls}
                    else:
                        raise BadRequest(
                            'This actor type is not currently supported')

                if in_stream_id not in retval:
                    retval[in_stream_id] = {}
                retval[in_stream_id][out_stream_id] = actor
        return retval
    def _read_by_path(self, path, orgname=None):
        """
        Given a qualified path, find entry in directory and return DirEntry
        document or None if not found
        """
        if path is None:
            raise BadRequest("Illegal arguments")
        orgname = orgname or self.orgname
        parent, key = path.rsplit("/", 1)
        parent = parent or "/"
        find_key = [orgname, key, parent]
        view_res = self.datastore.find_docs_by_view('directory', 'by_key', key=find_key, id_only=True)

        if len(view_res) > 1:
            raise Inconsistent("More than one directory entry found for key %s" % path)
        elif view_res:
            return view_res[0][2]  # First value
        return None
Beispiel #25
0
    def prepare(self, will_launch=True):
        """
        Prepare (validate) an agent for launch, fetching all associated resources

        @param will_launch - whether the running status should be checked -- set false if just generating config
        """
        assert self.agent_instance_obj

        if will_launch:
            #if there is an agent pid then assume that a drive is already started
            if self.agent_instance_obj.agent_process_id:
                raise BadRequest("Agent Instance already running for this device pid: %s" %
                                 str(self.agent_instance_obj.agent_process_id))

        # validate the associations, then pick things up
        self._collect_agent_instance_associations()
        self.will_launch = will_launch
        return self.generate_config()
def process_gateway_request(resource_id, operation, json_request, requester):

    if requester is not None:
        agent_execute_request["agentRequest"]["requester"] = requester

    payload = simplejson.dumps(json_request)

    response = agent_gateway_request(resource_id + '/' + operation, payload)

    if response['data'].has_key(GATEWAY_ERROR):
        log.error(response['data'][GATEWAY_ERROR][GATEWAY_ERROR_MESSAGE])
        raise BadRequest(
            response['data'][GATEWAY_ERROR][GATEWAY_ERROR_MESSAGE])

    if "type_" in response['data'][GATEWAY_RESPONSE]:
        del response['data'][GATEWAY_RESPONSE]["type_"]

    return response['data'][GATEWAY_RESPONSE]
Beispiel #27
0
    def publish_event(self, origin=None, event_type=None, **kwargs):
        """
        Publishes an event of given type for the given origin. Event_type defaults to an
        event_type set when initializing the EventPublisher. Other kwargs fill out the fields
        of the event. This operation will fail with an exception.
        @param origin     the origin field value
        @param event_type the event type (defaults to the EventPublisher's event_type if set)
        @param kwargs     additional event fields
        @retval event_object    the event object which was published
        """

        event_type = event_type or self.event_type
        if not event_type:
            raise BadRequest("No event_type provided")

        event_object = bootstrap.IonObject(event_type, origin=origin, **kwargs)
        ret_val = self.publish_event_object(event_object)
        return ret_val
Beispiel #28
0
    def _get_datastore(self, datastore_name=None):
        """
        Returns the couch datastore instance and datastore name.
        This caches the datastore instance to avoid an explicit lookup to save on http request.
        The consequence is that if another process deletes the datastore in the meantime, we will fail later.
        """
        datastore_name = self._get_datastore_name(datastore_name)

        if datastore_name in self._datastore_cache:
            return self._datastore_cache[datastore_name], datastore_name

        try:
            ds = self.server[datastore_name]  # Note: causes http lookup
            self._datastore_cache[datastore_name] = ds
            return ds, datastore_name
        except ResourceNotFound:
            raise NotFound("Data store '%s' does not exist" % datastore_name)
        except ValueError:
            raise BadRequest("Data store name '%s' invalid" % datastore_name)
Beispiel #29
0
 def register_mult(self, entries):
     """
     Registers multiple directory entries efficiently in one datastore access.
     Note: this fails of entries are currently existing, so works for create only.
     """
     if type(entries) not in (list, tuple):
         raise BadRequest("Bad entries type")
     de_list = []
     cur_time = get_ion_ts()
     for parent, key, attrs in entries:
         direntry = self._create_dir_entry(parent,
                                           key,
                                           attributes=attrs,
                                           ts=cur_time)
         de_list.append(direntry)
     deid_list = [
         create_unique_directory_id() for i in xrange(len(de_list))
     ]
     self.dir_store.create_mult(de_list, deid_list)
    def create_catalog(self, catalog_name='', keywords=None):
        """A catalog is a new data set that aggregates and presents datasets in a specific way.
        @param catalog_name    str
        @retval catalog_id    str
        """
        # Ensure unique catalog names 
        res, _ = self.clients.resource_registry.find_resources(name=catalog_name, id_only=True)
        if len(res) > 0:
            raise BadRequest('The catalog resource with name: %s, already exists.' % catalog_name)
        
        if keywords is None:
            keywords = []

        catalog_res = Catalog(name=catalog_name, catalog_fields=keywords)
        index_found = False
        available_fields = set()
        catalog_id, _ = self.clients.resource_registry.create(catalog_res)
        catalog_res = self.read_catalog(catalog_id)

        indexes = self.clients.index_management.list_indexes(id_only=False).values()
        last_resort = []
        for index in indexes:
            index_fields = set(index.options.attribute_match) | set(index.options.range_fields) | set(index.options.geo_fields)
            if set(keywords).issubset(index_fields):
                if len(index_fields) > (100): # Index is quite large, save for last resort
                    heapq.heappush(last_resort,(len(index_fields), index))
                    continue
                self.clients.resource_registry.create_association(subject=catalog_id, predicate=PRED.hasIndex, object=index._id)
                index_found = True
                available_fields = available_fields | index_fields
        if not index_found and last_resort:
            _, index = heapq.heappop(last_resort)
            self.clients.resource_registry.create_association(subject=catalog_id, predicate=PRED.hasIndex, object=index._id)
            index_found = True

        if not index_found:
            #@todo: notify the client that an empty catalog was formed
            pass

        catalog_res.available_fields = list(available_fields)

        self.update_catalog(catalog_res)
        return catalog_id