def create(cls, user, name, private): # PRECONDITION: user is a teacher if not is_title(name): return 'name', 'Empty Course Name' # client side error name = make_title(name) if cls.all().filter('name =', name).get() is not None: return 'name', 'Course name is already taken' if private: code = os.urandom(8).encode('hex') else: code = None course = cls(name=name, teacher=user, code=code) course.save() Search.add_words(course.name, course.pk, COURSE_TABLE) return course
def index(request): search_list = Search.objects.all()[:10] context = {'search_list': search_list} if request.method == 'GET' and 'search' in request.GET: context['search_input'] = search_input = request.GET['search'] try: search = Search.objects.get(title=search_input) if 'f' in request.GET: (context['search_results'], context['search_input'], song_ids) = storeSongs(search_input) search.songs = ','.join(map(str, song_ids)) search.save() else: song_ids = list(map(int, search.songs.split(','))) search_results = [] for song_id in song_ids: search_results.append(Song.objects.get(id=song_id)) context['search_results'] = search_results except Search.DoesNotExist: (context['search_results'], context['search_input'], song_ids) = storeSongs(search_input) Search(title=context['search_input'], songs=','.join(map(str, song_ids))).save() return render(request, 'search/results.html', context=context) return render(request, 'search/index.html', context=context)
def setUpTestData(cls): # NOTE: if you modify these elements they need to be created in setUp(), instead of here cls.user = get_user_model().objects.create_user('a', 'b', 'c') cls.user2 = get_user_model().objects.create_user('d', 'e', 'f') cls.project = Project(creator=cls.user, name_short='PRJ') cls.project.save() cls.project.developer.add(cls.user) cls.project2 = Project(creator=cls.user, name_short='PROJ') cls.project2.save() cls.project2.developer.add(cls.user) cls.project2.developer.add(cls.user2) # create test data (They are needed for search-purpose only) cls.issue = Issue(title="Test-Issue", project=cls.project, kanbancol=cls.project.kanbancol.first(), type="Bug") cls.issue.save() issue = Issue(title="Blub-Issue", project=cls.project, kanbancol=cls.project.kanbancol.first(), type="Bug") issue.save() issue = Issue(title="Bla-Issue", project=cls.project, kanbancol=cls.project.kanbancol.first(), type="Task") issue.save() cls.issuep2 = Issue(title="Bling-Issue", project=cls.project2, kanbancol=cls.project2.kanbancol.first(), type="Task") cls.issuep2.due_date = datetime.date(2000, 1, 1) cls.issuep2.save() cls.search = Search("description", "*", cls.user)
def test_saving_and_retrieving_searches(self): first_search = Search() first_search.keyword = 'chicken soup' first_search.save() second_search = Search() second_search.keyword = 'cookies' second_search.save() saved_searches = Search.objects.all() self.assertEqual(saved_searches.count(), 2) first_saved_search = saved_searches[0] second_saved_search = saved_searches[1] self.assertEqual(first_saved_search.keyword, 'chicken soup') self.assertEqual(second_saved_search.keyword, 'cookies')
def list(self, request, *args, **kwargs): data = request.GET.copy() response = [] if "word" in data: response = Search().search_text(data["word"]) print(response) return Response({"data": response})
def test_published_is_indexed(published): assert Search.objects.count() == 1 assert len(list(Search.search(public=True))) == 1 assert Content.objects.search("Ikinyugunyugu").count() == 0 published.title = "Ikinyugunyugu" published.save() assert Search.objects.count() == 1 assert Content.objects.search("Ikinyugunyugu").count() == 1
def query(expression, user): try: expr = parser.compile(expression) result = app_list[parser.obj_to_query].objects.filter( expr).distinct() # order results for s in parser.sort_by: result = result.order_by(s) # save as non-persistent search object if Search.objects.filter(searchexpression=expression).count() == 0: Search(description="Autosave", searchexpression=expression, creator=user).save() except Exception as e: # parsing exception, search all fields for expression, case of full-text-search (can't be parsed) # full-text search aren't stored for reuse currently # if len(expression): # Search(description="Autosave", searchexpression=expression, creator=user).save() # skip search if we have less than 3 characters in our expression if len(expression) < 3: raise ValueError() result = SearchFrontend.search_all_fields_for(expression) # check user permissions valid_items = [r for r in result if r.search_allowed_for_user(user)] # limit results if parser.limit != -1: valid_items = valid_items[:parser.limit] # prepare a list of lists containing information about discovered items retval = [] for i in valid_items: linktext = i.get_search_title() link = i.get_absolute_url() objname = i.__class__.__name__ relative_project = i.get_relative_project() retval.append([linktext, link, objname, relative_project]) return retval
def simple_search_text(search_serializer, page, page_size, user_id): s_text = search_serializer.data['search_text'] if s_text in stopwords: return JsonResponse({'documents': [], 'count': 0}) if page_size == -1: str_page_size = search_serializer.data['page_size'] if str_page_size: page_size = int(str_page_size) if page_size: if page_size < 1 or page_size > 10: page_size = 10 else: page_size = 10 top_limit = page_size * page search = Search() search.search_text = s_text search.save() search_history = SearchHistory() search_history.userid = user_id search_history.search = search search_history.save() doc_set = set() doc = [] str_s_options = search_serializer.data['search_options'] search_options_serializer = SearchOptionsSerializer(data=str_s_options) search_options_serializer.is_valid() s_options = search_options_serializer.data result = [] if s_options: if s_options.get('name_ru'): if s_text: if s_text != s_options.get('name_ru'): s_text = s_text + " " + s_options.get('name_ru') else: s_text = s_options.get('name_ru') if s_text: if SEARCH_QUERY: serched = SEARCH_QUERY.phrase_query(s_text.lower()) for doc_id in serched: doc_set.add(doc_id) for docId in doc_set: document = Documents.objects.get(pk=docId) if s_options: if s_options.get('document_status'): if not in_document(document.doc_status, s_options.get('document_status')): continue if s_options.get('document_type'): if not in_document(document.doc_kind, s_options.get('document_type')): continue if s_options.get('brief_document_description'): if not in_document( s_options.get('brief_document_description'), document.doc_mark): continue if s_options.get('name_en'): if not in_document(s_options.get('name_en'), document.doc_name_en): continue if s_options.get('abstract'): if not in_document(s_options.get('abstract'), document.doc_annotation): continue if s_options.get('note'): if not in_document(s_options.get('note'), document.doc_comment): continue if s_options.get('full_designation_of_the_document'): if not in_document( s_options.get( 'full_designation_of_the_document'), document.doc_full_mark): continue if s_options.get('okved'): try: code = CodeOKVED.objects.filter( title=s_options.get('okved')) if document.classifier_okved != code: continue except Exception as ex: LOGGER.error(ex) continue if s_options.get('oks'): try: code = CodeOKS.objects.filter( title=s_options.get('oks')) if document.classifier_oks != code: continue except Exception as ex: LOGGER.error(ex) continue if s_options.get('tk'): if not in_document(s_options.get('tk'), document.tk_rus): continue if s_options.get('mtk'): if not in_document(s_options.get('mtk'), document.mtk_dev): continue if s_options.get('keywords'): if not in_document(s_options.get('keywords'), document.keywords): continue if s_options.get('date_of_adoption'): if str(document.doc_assign_date) != s_options.get( 'date_of_adoption'): continue if s_options.get('effective_date'): if str(document.doc_effective_date) != s_options.get( 'effective_date'): continue if s_options.get('recover_date'): if str(document.doc_restoration_date) != s_options.get( 'recover_date'): continue ds = DocumentsSerializer(document) result.append(ds.data) else: if document.doc_status == 'actual': ds = DocumentsSerializer(document) result.append(ds.data) if len(result) == top_limit: return JsonResponse({ 'documents': result[(page - 1) * page_size:], 'count': len(result) }) count = len(result) limit_for_free_search = top_limit - len(result) doc_set = set() if limit_for_free_search > 0: free_serched = SEARCH_QUERY.free_text_query(s_text.lower()) count = count + len(free_serched) free_serched = free_serched[:limit_for_free_search] for free_doc_id in free_serched: doc_set.add(free_doc_id) result_advanced = [] for docId in doc_set: document = Documents.objects.get(pk=docId) if s_options: if s_options.get('document_status'): if not in_document(document.doc_status, s_options.get('document_status')): continue if s_options.get('document_type'): if not in_document(document.doc_kind, s_options.get('document_type')): continue if s_options.get('brief_document_description'): if not in_document( s_options.get('brief_document_description'), document.doc_mark): continue if s_options.get('name_en'): if not in_document(s_options.get('name_en'), document.doc_name_en): continue if s_options.get('abstract'): if not in_document(s_options.get('abstract'), document.doc_annotation): continue if s_options.get('note'): if not in_document(s_options.get('note'), document.doc_comment): continue if s_options.get('full_designation_of_the_document'): if not in_document( s_options.get( 'full_designation_of_the_document'), document.doc_full_mark): continue if s_options.get('okved'): try: code = CodeOKVED.objects.filter( title=s_options.get('okved')) if document.classifier_okved != code: continue except Exception as ex: LOGGER.error(ex) continue if s_options.get('oks'): try: code = CodeOKS.objects.filter( title=s_options.get('oks')) if document.classifier_oks != code: continue except Exception as ex: LOGGER.error(ex) continue if s_options.get('tk'): if not in_document(s_options.get('tk'), document.tk_rus): continue if s_options.get('mtk'): if not in_document(s_options.get('mtk'), document.mtk_dev): continue if s_options.get('keywords'): if not in_document(s_options.get('keywords'), document.keywords): continue if s_options.get('date_of_adoption'): if str(document.doc_assign_date) != s_options.get( 'date_of_adoption'): continue if s_options.get('effective_date'): if str(document.doc_effective_date) != s_options.get( 'effective_date'): continue if s_options.get('recover_date'): if str(document.doc_restoration_date) != s_options.get( 'recover_date'): continue ds = DocumentsSerializer(document) result_advanced.append(ds.data) else: if document.doc_status == 'actual': ds = DocumentsSerializer(document) result_advanced.append(ds.data) result = result + result_advanced if len(result) < (page - 1) * page_size: return JsonResponse({'documents': [], 'count': 0}) else: return JsonResponse({ 'documents': result[(page - 1) * page_size:], 'count': count }) else: if s_options: if s_options.get('document_status'): try: doc.append( Documents.objects.filter(doc_kind__contains=s_options. get('document_status'))) except Exception as ex: LOGGER.error(ex) if s_options.get('document_type') and len(doc) < top_limit: try: doc.append( Documents.objects.filter( doc_kind__contains=s_options.get('document_type'))) except Exception as ex: LOGGER.error(ex) if s_options.get( 'brief_document_description') and len(doc) < top_limit: try: doc.append( Documents.objects.filter( doc_mark__contains=s_options.get( 'brief_document_description'))) except Exception as ex: LOGGER.error(ex) if s_options.get('name_en') and len(doc) < top_limit: try: doc.append( Documents.objects.filter( doc_name_en__contains=s_options.get('name_en'))) except Exception as ex: LOGGER.error(ex) if s_options.get('abstract') and len(doc) < top_limit: try: doc.append( Documents.objects.filter( doc_annotation__contains=s_options.get( 'abstract'))) except Exception as ex: LOGGER.error(ex) if s_options.get('note') and len(doc) < top_limit: try: doc.append( Documents.objects.filter( doc_comment__contains=s_options.get('note'))) except Exception as ex: LOGGER.error(ex) if s_options.get('full_designation_of_the_document' ) and len(doc) < top_limit: try: doc.append( Documents.objects.filter( doc_full_mark__contains=s_options.get( 'full_designation_of_the_document'))) except Exception as ex: LOGGER.error(ex) if s_options.get('okved') and len(doc) < top_limit: try: code = CodeOKVED.objects.filter( title=s_options.get('okved')) for c in code: doc.append( Documents.objects.filter(classifier_okved=c)) except Exception as ex: LOGGER.error(ex) if s_options.get('oks') and len(doc) < top_limit: try: code = CodeOKS.objects.filter( code__contains=s_options.get('oks')) for c in code: doc.append(Documents.objects.filter(classifier_oks=c)) except Exception as ex: LOGGER.error(ex) if s_options.get('tk') and len(doc) < top_limit: try: doc.append( Documents.objects.filter( tk_rus__contains=s_options.get('tk'))) except Exception as ex: LOGGER.error(ex) if s_options.get('mtk') and len(doc) < top_limit: try: doc.append( Documents.objects.filter( mtk_dev__contains=s_options.get('mtk'))) except Exception as ex: LOGGER.error(ex) if s_options.get('keywords') and len(doc) < top_limit: try: doc.append( Documents.objects.filter( keywords__contains=s_options.get('keywords'))) except Exception as ex: LOGGER.error(ex) if s_options.get('date_of_adoption') and len(doc) < top_limit: try: search_date = datetime.strptime( s_options.get('date_of_adoption'), '%Y-%m-%d') doc.append( Documents.objects.filter(doc_assign_date=search_date)) except Exception as ex: LOGGER.error(ex) if s_options.get('effective_date') and len(doc) < top_limit: try: search_date = datetime.strptime( s_options.get('effective_date'), '%Y-%m-%d') doc.append( Documents.objects.filter( doc_effective_date=search_date)) except Exception as ex: LOGGER.error(ex) if s_options.get('recover_date') and len(doc) < top_limit: try: search_date = datetime.strptime( s_options.data['recover_date'], '%Y-%m-%d') doc.append( Documents.objects.filter( doc_restoration_date=search_date)) except Exception as ex: LOGGER.error(ex) if len(doc) < page * page_size: return JsonResponse({'documents': []}) doc_list = doc[(page - 1) * page_size:page * page_size] for d in doc_list: for i in range(len(d)): doc_set.add(d[i].pk) for docId in doc_set: ds = DocumentsSerializer(Documents.objects.get(pk=docId)) result.append(ds.data) return JsonResponse({'documents': result, 'count': len(doc)}) return JsonResponse({'documents': [], 'count': 0})
def add(request): """ Handling the add of a new search """ user = request.user.username first = request.user.first_name last = request.user.last_name if request.method == 'POST': form = AddForm(request.POST) if form.is_valid(): word = form.cleaned_data['word'] domain = form.cleaned_data['domain'] # option = form.cleaned_data['option'] Word.objects.get_or_create(expression = word) # for opt in option: # o = Option.objects.filter(id = opt) # w.options.add(o.get(pk = opt)) try : _cron = Crontab.objects.filter(has_reached_limit = False)[0] except IndexError: num = Crontab.objects.count() if num == 0: _cron = Crontab(number_of_searches = 0, priority = 0) else: _cron = Crontab(number_of_searches = 0, priority = 1) for cron in Crontab.objects.all(): if cron.priority != 0: cron.priority = cron.priority + 1 cron.save() _cron.save() res = Search(name = word, words = word, cron = _cron) res.save() _cron.number_of_searches = _cron.number_of_searches + 1 if _cron.number_of_searches == 100: _cron.has_reached_limit = True _cron.save() dom = Domain.objects.filter(id = domain) res.domains.add(dom.get(pk = domain)) return HttpResponseRedirect('../view/') else: form = AddForm() return render_to_response('add.html', {'form' : form, 'user' : user, 'first' : first, 'last' : last,}, context_instance=RequestContext(request) )
def test_deleted_is_indexed(deleted): assert Search.objects.count() == 1 assert len(list(Search.search(public=True))) == 0