def delete(self, *args, **kwargs): from wirecloud.catalogue.utils import wgt_deployer old_id = self.id super(CatalogueResource, self).delete(*args, **kwargs) # Preserve the id attribute a bit more so CatalogueResource methods can use it self.id = old_id # Undeploy the resource from the filesystem try: wgt_deployer.undeploy(self.vendor, self.short_name, self.version) except: # TODO log this error pass # ignore errors # Remove cache for this resource self.invalidate_cache() # Remove document from search indexes try: with get_search_engine('resource').get_batch_writer() as writer: writer.delete_by_term('pk', '%s' % old_id) except: pass # ignore errors # Remove id attribute definetly self.id = None
def test_basic_search_with_querytext_multireader_empty(self): # Check WireCloud is not affected by bug #415 of Whoosh # Force whoosh to use a MultiReader instance searcher = get_search_engine('resource') with searcher.get_batch_writer() as writer: writer.add_document(**{ 'pk': '1000', 'vendor_name': 'Wirecloud/new', 'vendor': 'Wirecloud', 'name': 'new', 'version': '1.0', 'template_uri': 'http://example.com', 'type': 'widget', 'creation_date': datetime.utcnow(), 'public': False, 'title': 'New', 'description': 'description', 'wiring': 'description', 'image': 'image.png', 'smartphoneimage': 'smartphoneimage.png', 'users': '', 'groups': '', 'content': 'detailed description', }) self.client.login(username='******', password='******') # Empty query result = self.client.get(self.base_url + '?q=totally+uncorrectable+search+giving+an+empty+resultset') result_json = json.loads(result.content.decode('utf-8')) self.assertEqual(result.status_code, 200) self.assertEqual(result_json['pagenum'], 1) self.assertEqual(result_json['pagelen'], 0) self.assertEqual(result_json['pagelen'], len(result_json['results']))
def search(querytext, request, pagenum=1, maxresults=30, staff=False, scope=None, orderby='-creation_date'): search_engine = get_search_engine('resource') search_result = {} if pagenum < 1: pagenum = 1 with search_engine.searcher() as searcher: parser = MultifieldParser(search_engine.default_search_fields, searcher.schema) user_q = querytext and parser.parse(querytext) or Every() user_q, search_kwargs = build_search_kwargs(user_q, request, scope, staff, orderby) hits = searcher.search(user_q, limit=(pagenum * maxresults) + 1, **search_kwargs) if querytext and hits.is_empty(): correction_q = parser.parse(querytext) corrected = searcher.correct_query(correction_q, querytext) if corrected.query != correction_q: querytext = corrected.string search_result['corrected_q'] = querytext user_q, search_kwargs = build_search_kwargs(corrected.query, request, scope, staff, orderby) hits = searcher.search(user_q, limit=(pagenum * maxresults), **search_kwargs) search_engine.prepare_search_response(search_result, hits, pagenum, maxresults) search_result['results'] = add_other_versions(searcher, search_result['results'], request.user, staff) add_absolute_urls(search_result['results'], request) return search_result
def read(self, request): querytext = request.GET.get('q', '') indexname = request.GET.get('namespace', '').strip() if indexname == '': message = _( 'Missing namespace GET parameter providing a search namespace') return build_error_response(request, 400, message) if not is_available(indexname): message = _('Invalid search namespace: %s' % indexname) return build_error_response(request, 422, message) try: pagenum = int(request.GET.get('pagenum', '1')) except ValueError: message = _('Invalid pagenum value: %s' % request.GET['pagenum']) return build_error_response(request, 422, message) try: maxresults = int(request.GET.get('maxresults', '30')) except ValueError: message = _('Invalid maxresults value: %s' % request.GET['maxresults']) return build_error_response(request, 422, message) result = get_search_engine(indexname).search(querytext, request, pagenum=pagenum, maxresults=maxresults) return HttpResponse(json.dumps(result, sort_keys=True), status=200, content_type='application/json; charset=utf-8')
def _handle(self, *args, **options): self.interactive = options['interactive'] self.verbosity = int(options.get('verbosity', 1)) from django.conf import settings dirname = settings.WIRECLOUD_INDEX_DIR if options['indexes'] == '': indexes = [ search_engine.indexname for search_engine in get_available_search_engines() ] else: indexes = options['indexes'].split(',') nonavailable_indexes = [] for index in indexes: if not is_available(index): nonavailable_indexes.append(index) if len(nonavailable_indexes) > 0: raise CommandError(self.nonavailable_indexes_message % nonavailable_indexes) if os.path.exists(dirname): message = ['\n'] message.append( ugettext( 'You have requested to reset indexes found in the location\n' 'specified in your settings:\n\n' ' %s\n\n' % dirname)) message.append(ugettext('This will DELETE EXISTING FILES!\n')) message.append( ugettext('Are you sure you want to do this?\n\n' "Type 'yes' to continue, or 'no' to cancel: ")) if self.interactive and input(''.join(message)) != 'yes': raise CommandError(_("Reset search indexes cancelled.")) else: os.mkdir(dirname) for indexname in indexes: self.log(self.update_start_message % indexname) search_engine = get_search_engine(indexname) search_engine.clear_index() for resource in search_engine.get_model().objects.all(): self.log(' ' + _('Adding %s\n') % resource) search_engine.add_resource(resource) self.log(self.update_success_message % indexname)
def search(querytext, request, pagenum=1, maxresults=30, staff=False, scope=None, orderby='-creation_date'): search_engine = get_search_engine('resource') search_result = {} if pagenum < 1: pagenum = 1 with search_engine.searcher() as searcher: fieldnames = ['description', 'vendor', 'title', 'wiring'] query_p = QueryParser('content', searcher.schema) multif_p = MultifieldParser(fieldnames, searcher.schema) user_q = querytext and query_p.parse(querytext) or Every() user_q, search_kwargs = build_search_kwargs(user_q, request, scope, staff, orderby) hits = searcher.search(user_q, limit=(pagenum * maxresults) + 1, **search_kwargs) if querytext and hits.is_empty(): patch_expand_prefix(searcher) correction_q = multif_p.parse(querytext) corrected = searcher.correct_query(correction_q, querytext) if corrected.query != correction_q: querytext = corrected.string search_result['corrected_q'] = querytext user_q = query_p.parse(querytext) user_q, search_kwargs = build_search_kwargs( user_q, request, scope, staff, orderby) hits = searcher.search(user_q, limit=(pagenum * maxresults), **search_kwargs) search_engine.prepare_search_response(search_result, hits, pagenum, maxresults) search_result['results'] = add_other_versions(searcher, search_result['results'], request.user, staff) add_absolute_urls(search_result['results'], request) return search_result
def _handle_noargs(self, **options): self.interactive = options['interactive'] self.verbosity = int(options.get('verbosity', 1)) from django.conf import settings dirname = settings.WIRECLOUD_INDEX_DIR if options['indexes'] == '': indexes = [search_engine.indexname for search_engine in get_available_search_engines()] else: indexes = options['indexes'].split(',') nonavailable_indexes = [] for index in indexes: if not is_available(index): nonavailable_indexes.append(index) if len(nonavailable_indexes) > 0: raise CommandError(self.nonavailable_indexes_message % nonavailable_indexes) if os.path.exists(dirname): message = ['\n'] message.append( 'You have requested to reset indexes found in the location\n' 'specified in your settings:\n\n' ' %s\n\n' % dirname ) message.append('This will DELETE EXISTING FILES!\n') message.append( 'Are you sure you want to do this?\n\n' "Type 'yes' to continue, or 'no' to cancel: " ) if self.interactive and input(''.join(message)) != 'yes': raise CommandError("Reset search indexes cancelled.") else: os.mkdir(dirname) for indexname in indexes: self.log(self.update_start_message % indexname) search_engine = get_search_engine(indexname) search_engine.clear_index() for resource in search_engine.get_model().objects.all(): self.log(' ' + _('Adding %s\n') % resource) search_engine.add_resource(resource) self.log(self.update_success_message % indexname)
def suggest(request, prefix='', limit=30): reader = get_search_engine('resource').open_index().reader() frequent_terms = {} for fieldname in ['title', 'vendor', 'description']: for frequency, term in reader.most_frequent_terms(fieldname, limit, prefix): if term in frequent_terms: frequent_terms[term] += frequency else: frequent_terms[term] = frequency # flatten terms return [term.decode('utf-8') for term, frequency in sorted(frequent_terms.items(), key=operator.itemgetter(1), reverse=True)[:limit]]
def read(self, request): querytext = request.GET.get('q', '') indexname = request.GET.get('namespace', '').strip() if indexname == '': message = _('Missing namespace GET parameter providing a search namespace') return build_error_response(request, 400, message) if not is_available(indexname): message = _('Invalid search namespace: %s' % indexname) return build_error_response(request, 422, message) result = get_search_engine(indexname).search(querytext) return HttpResponse(json.dumps(result, ensure_ascii=False), status=200, content_type='application/json; charset=utf-8')
def read(self, request): querytext = request.GET.get('q', '') indexname = request.GET.get('namespace', '').strip() if indexname == '': message = _( 'Missing namespace GET parameter providing a search namespace') return build_error_response(request, 400, message) if not is_available(indexname): message = _('Invalid search namespace: %s' % indexname) return build_error_response(request, 422, message) result = get_search_engine(indexname).search(querytext) return HttpResponse(json.dumps(result), status=200, content_type='application/json; charset=utf-8')
def search(querytext, request, pagenum=1, maxresults=30, staff=False, scope=None, orderby='-creation_date'): search_engine = get_search_engine('resource') search_result = {} if pagenum < 1: pagenum = 1 with search_engine.searcher() as searcher: fieldnames = ['description', 'vendor', 'title', 'wiring'] query_p = QueryParser('content', searcher.schema) multif_p = MultifieldParser(fieldnames, searcher.schema) user_q = querytext and query_p.parse(querytext) or Every() user_q, search_kwargs = build_search_kwargs(user_q, request, scope, staff, orderby) hits = searcher.search(user_q, limit=(pagenum * maxresults) + 1, **search_kwargs) if querytext and hits.is_empty(): # TODO currently searches from BufferedWriters give problems when correcting queries with search_engine.open_index().searcher() as corrector: correction_q = multif_p.parse(querytext) corrected = corrector.correct_query(correction_q, querytext) if corrected.query != correction_q: querytext = corrected.string search_result['corrected_q'] = querytext user_q = query_p.parse(querytext) user_q, search_kwargs = build_search_kwargs(user_q, request, scope, staff, orderby) hits = searcher.search(user_q, limit=(pagenum * maxresults), **search_kwargs) search_page(search_result, hits, pagenum, maxresults) search_result['results'] = add_other_versions(searcher, search_result['results'], request.user, staff) add_absolute_urls(search_result['results'], request) return search_result
def update_group_index(sender, instance, created, **kwargs): get_search_engine('group').add_resource(instance, created)
def update_catalogue_index(sender, instance, created, **kwargs): try: get_search_engine('resource').add_resource(instance, created) except: logger.warning("Error adding %s into the catalogue search index" % instance.local_uri_part)
def update_workspace_index(sender, instance, created, **kwargs): try: get_search_engine('workspace').add_resource(instance, created) except: logger.warning("Error adding %s into the workspace search index" % instance)
def update_user_index(sender, instance, created, **kwargs): get_search_engine('user').add_resource(instance, created)