def get(self, **kwargs): """Return JSON data of a crash report, given its uuid. """ filters = [ ('uuid', None, 'str'), ('datatype', None, 'str'), ('name', None, 'str') # only applicable if datatype == 'raw' ] params = external_common.parse_arguments(filters, kwargs) if not params.uuid: raise MissingArgumentError('uuid') if not params.datatype: raise MissingArgumentError('datatype') # get a generic crashstorage instance from whatever external resource # is implementing this service. store = self.get_storage() datatype_method_mapping = { 'raw': 'get_raw_dump', 'meta': 'get_raw_crash', 'processed': 'get_processed', 'unredacted': 'get_unredacted_processed', } get = store.__getattribute__(datatype_method_mapping[params.datatype]) try: if params.datatype == 'raw': return (get(params.uuid, name=params.name), 'application/octet-stream') else: return get(params.uuid) except CrashIDNotFound: if params.datatype in ('processed', 'unredacted'): # try to fetch a raw crash just to ensure that the raw crash # exists. If this line fails, there's no reason to actually # submit the priority job. try: store.get_raw_crash(params.uuid) except CrashIDNotFound: raise ResourceNotFound(params.uuid) # search through the existing other services to find the # Priorityjob service. try: priorityjob_service_impl = self.all_services[ 'Priorityjobs'] except KeyError: raise ServiceUnavailable('Priorityjobs') # get the underlying implementation of the Priorityjob # service and instantiate it. priority_job_service = priorityjob_service_impl.cls( config=self.config) # create the priority job for this crash_ids priority_job_service.create(uuid=params.uuid) raise ResourceUnavailable(params.uuid) raise ResourceNotFound(params.uuid)
def get(self, **kwargs): '''Return the result of a custom query. ''' params = external_common.parse_arguments(self.filters, kwargs) if not params.query: raise MissingArgumentError('query') # Set indices. indices = [] if not params.indices: # By default, use the last two indices. today = datetimeutil.utc_now() last_week = today - datetime.timedelta(days=7) indices = self.generate_list_of_indexes(last_week, today) elif len(params.indices) == 1 and params.indices[0] == 'ALL': # If we want all indices, just do nothing. pass else: indices = params.indices search_args = {} if indices: search_args['index'] = indices search_args['doc_type'] = ( self.config.elasticsearch.elasticsearch_doctype ) connection = self.get_connection() try: results = connection.search( body=json.dumps(params.query), **search_args ) except elasticsearch.exceptions.NotFoundError as e: missing_index = re.findall(BAD_INDEX_REGEX, e.error)[0] raise ResourceNotFound( "elasticsearch index '%s' does not exist" % missing_index ) except elasticsearch.exceptions.TransportError as e: raise DatabaseError(e) return results
def get(self, **kwargs): self.context.logger.info('Running %s' % self.__class__.__name__) raise ResourceNotFound('not here')
def update_field(self, **kwargs): """Update an existing field in the database. If the field does not exist yet, a ResourceNotFound error is raised. If you want to update only some keys, just do not pass the ones you don't want to change. """ filters = [ ('name', None, 'str'), ('data_validation_type', None, 'str'), ('default_value', None, 'str'), ('description', None, 'str'), ('form_field_choices', None, ['list', 'str']), ('has_full_version', None, 'bool'), ('in_database_name', None, 'str'), ('is_exposed', None, 'bool'), ('is_returned', None, 'bool'), ('is_mandatory', None, 'bool'), ('query_type', None, 'str'), ('namespace', None, 'str'), ('permissions_needed', None, ['list', 'str']), ('storage_mapping', None, 'json'), ] params = external_common.parse_arguments(filters, kwargs) if not params['name']: raise MissingArgumentError('name') # Remove all the parameters that were not explicitely passed. for key in params.keys(): if key not in kwargs: del params[key] es_connection = self.get_connection() es_index = self.config.elasticsearch.elasticsearch_default_index es_doc_type = 'supersearch_fields' # First verify that the field does exist. try: old_value = es_connection.get( index=es_index, doc_type=es_doc_type, id=params['name'], )['_source'] # Only the actual document is of interest. except elasticsearch.exceptions.NotFoundError: # This field does not exist yet, it thus cannot be updated! raise ResourceNotFound( 'The field "%s" does not exist in the database, it needs to ' 'be created before it can be updated. ' % params['name']) # Then, if necessary, verify the new mapping. if (('storage_mapping' in params and params['storage_mapping'] != old_value['storage_mapping']) or ('in_database_name' in params and params['in_database_name'] != old_value['in_database_name'])): # This is a change that will have an impact on the Elasticsearch # mapping, we first need to make sure it doesn't break. new_mapping = self.get_mapping(overwrite_mapping=params) # Try the mapping. If there is an error, an exception will be # raised. If an exception is raised, the new mapping will be # rejected. self.test_mapping(new_mapping) if ('storage_mapping' in params and params['storage_mapping'] != old_value['storage_mapping']): # The storage mapping is an object, and thus is treated # differently than other fields by Elasticsearch. If a user # changes the object by removing a field from it, that field won't # be removed as part of the update (which performs a merge of all # objects in the back-end). We therefore want to perform the merge # ourselves, and remove the field from the database before # re-indexing it. new_doc = old_value.copy() new_doc.update(params) es_connection.delete( index=es_index, doc_type=es_doc_type, id=new_doc['name'], ) es_connection.index( index=es_index, doc_type=es_doc_type, body=new_doc, id=new_doc['name'], op_type='create', refresh=True, ) # If we made a change to the storage_mapping, log that change. self.config.logger.info( 'Elasticsearch mapping changed for field "%s", ' 'was "%s", now "%s"', params['name'], old_value['storage_mapping'], new_doc['storage_mapping'], ) else: # Then update the new field in the database. Note that # Elasticsearch takes care of merging the new document into the # old one, so missing values won't be changed. es_connection.update( index=es_index, doc_type=es_doc_type, body={'doc': params}, id=params['name'], refresh=True, ) return True