def test_saving_and_retrieving_searches(self): first_search = Search() first_search.keyword = 'chicken soup' first_search.save() second_search = Search() second_search.keyword = 'cookies' second_search.save() saved_searches = Search.objects.all() self.assertEqual(saved_searches.count(), 2) first_saved_search = saved_searches[0] second_saved_search = saved_searches[1] self.assertEqual(first_saved_search.keyword, 'chicken soup') self.assertEqual(second_saved_search.keyword, 'cookies')
def simple_search_text(search_serializer, page, page_size, user_id): s_text = search_serializer.data['search_text'] if s_text in stopwords: return JsonResponse({'documents': [], 'count': 0}) if page_size == -1: str_page_size = search_serializer.data['page_size'] if str_page_size: page_size = int(str_page_size) if page_size: if page_size < 1 or page_size > 10: page_size = 10 else: page_size = 10 top_limit = page_size * page search = Search() search.search_text = s_text search.save() search_history = SearchHistory() search_history.userid = user_id search_history.search = search search_history.save() doc_set = set() doc = [] str_s_options = search_serializer.data['search_options'] search_options_serializer = SearchOptionsSerializer(data=str_s_options) search_options_serializer.is_valid() s_options = search_options_serializer.data result = [] if s_options: if s_options.get('name_ru'): if s_text: if s_text != s_options.get('name_ru'): s_text = s_text + " " + s_options.get('name_ru') else: s_text = s_options.get('name_ru') if s_text: if SEARCH_QUERY: serched = SEARCH_QUERY.phrase_query(s_text.lower()) for doc_id in serched: doc_set.add(doc_id) for docId in doc_set: document = Documents.objects.get(pk=docId) if s_options: if s_options.get('document_status'): if not in_document(document.doc_status, s_options.get('document_status')): continue if s_options.get('document_type'): if not in_document(document.doc_kind, s_options.get('document_type')): continue if s_options.get('brief_document_description'): if not in_document( s_options.get('brief_document_description'), document.doc_mark): continue if s_options.get('name_en'): if not in_document(s_options.get('name_en'), document.doc_name_en): continue if s_options.get('abstract'): if not in_document(s_options.get('abstract'), document.doc_annotation): continue if s_options.get('note'): if not in_document(s_options.get('note'), document.doc_comment): continue if s_options.get('full_designation_of_the_document'): if not in_document( s_options.get( 'full_designation_of_the_document'), document.doc_full_mark): continue if s_options.get('okved'): try: code = CodeOKVED.objects.filter( title=s_options.get('okved')) if document.classifier_okved != code: continue except Exception as ex: LOGGER.error(ex) continue if s_options.get('oks'): try: code = CodeOKS.objects.filter( title=s_options.get('oks')) if document.classifier_oks != code: continue except Exception as ex: LOGGER.error(ex) continue if s_options.get('tk'): if not in_document(s_options.get('tk'), document.tk_rus): continue if s_options.get('mtk'): if not in_document(s_options.get('mtk'), document.mtk_dev): continue if s_options.get('keywords'): if not in_document(s_options.get('keywords'), document.keywords): continue if s_options.get('date_of_adoption'): if str(document.doc_assign_date) != s_options.get( 'date_of_adoption'): continue if s_options.get('effective_date'): if str(document.doc_effective_date) != s_options.get( 'effective_date'): continue if s_options.get('recover_date'): if str(document.doc_restoration_date) != s_options.get( 'recover_date'): continue ds = DocumentsSerializer(document) result.append(ds.data) else: if document.doc_status == 'actual': ds = DocumentsSerializer(document) result.append(ds.data) if len(result) == top_limit: return JsonResponse({ 'documents': result[(page - 1) * page_size:], 'count': len(result) }) count = len(result) limit_for_free_search = top_limit - len(result) doc_set = set() if limit_for_free_search > 0: free_serched = SEARCH_QUERY.free_text_query(s_text.lower()) count = count + len(free_serched) free_serched = free_serched[:limit_for_free_search] for free_doc_id in free_serched: doc_set.add(free_doc_id) result_advanced = [] for docId in doc_set: document = Documents.objects.get(pk=docId) if s_options: if s_options.get('document_status'): if not in_document(document.doc_status, s_options.get('document_status')): continue if s_options.get('document_type'): if not in_document(document.doc_kind, s_options.get('document_type')): continue if s_options.get('brief_document_description'): if not in_document( s_options.get('brief_document_description'), document.doc_mark): continue if s_options.get('name_en'): if not in_document(s_options.get('name_en'), document.doc_name_en): continue if s_options.get('abstract'): if not in_document(s_options.get('abstract'), document.doc_annotation): continue if s_options.get('note'): if not in_document(s_options.get('note'), document.doc_comment): continue if s_options.get('full_designation_of_the_document'): if not in_document( s_options.get( 'full_designation_of_the_document'), document.doc_full_mark): continue if s_options.get('okved'): try: code = CodeOKVED.objects.filter( title=s_options.get('okved')) if document.classifier_okved != code: continue except Exception as ex: LOGGER.error(ex) continue if s_options.get('oks'): try: code = CodeOKS.objects.filter( title=s_options.get('oks')) if document.classifier_oks != code: continue except Exception as ex: LOGGER.error(ex) continue if s_options.get('tk'): if not in_document(s_options.get('tk'), document.tk_rus): continue if s_options.get('mtk'): if not in_document(s_options.get('mtk'), document.mtk_dev): continue if s_options.get('keywords'): if not in_document(s_options.get('keywords'), document.keywords): continue if s_options.get('date_of_adoption'): if str(document.doc_assign_date) != s_options.get( 'date_of_adoption'): continue if s_options.get('effective_date'): if str(document.doc_effective_date) != s_options.get( 'effective_date'): continue if s_options.get('recover_date'): if str(document.doc_restoration_date) != s_options.get( 'recover_date'): continue ds = DocumentsSerializer(document) result_advanced.append(ds.data) else: if document.doc_status == 'actual': ds = DocumentsSerializer(document) result_advanced.append(ds.data) result = result + result_advanced if len(result) < (page - 1) * page_size: return JsonResponse({'documents': [], 'count': 0}) else: return JsonResponse({ 'documents': result[(page - 1) * page_size:], 'count': count }) else: if s_options: if s_options.get('document_status'): try: doc.append( Documents.objects.filter(doc_kind__contains=s_options. get('document_status'))) except Exception as ex: LOGGER.error(ex) if s_options.get('document_type') and len(doc) < top_limit: try: doc.append( Documents.objects.filter( doc_kind__contains=s_options.get('document_type'))) except Exception as ex: LOGGER.error(ex) if s_options.get( 'brief_document_description') and len(doc) < top_limit: try: doc.append( Documents.objects.filter( doc_mark__contains=s_options.get( 'brief_document_description'))) except Exception as ex: LOGGER.error(ex) if s_options.get('name_en') and len(doc) < top_limit: try: doc.append( Documents.objects.filter( doc_name_en__contains=s_options.get('name_en'))) except Exception as ex: LOGGER.error(ex) if s_options.get('abstract') and len(doc) < top_limit: try: doc.append( Documents.objects.filter( doc_annotation__contains=s_options.get( 'abstract'))) except Exception as ex: LOGGER.error(ex) if s_options.get('note') and len(doc) < top_limit: try: doc.append( Documents.objects.filter( doc_comment__contains=s_options.get('note'))) except Exception as ex: LOGGER.error(ex) if s_options.get('full_designation_of_the_document' ) and len(doc) < top_limit: try: doc.append( Documents.objects.filter( doc_full_mark__contains=s_options.get( 'full_designation_of_the_document'))) except Exception as ex: LOGGER.error(ex) if s_options.get('okved') and len(doc) < top_limit: try: code = CodeOKVED.objects.filter( title=s_options.get('okved')) for c in code: doc.append( Documents.objects.filter(classifier_okved=c)) except Exception as ex: LOGGER.error(ex) if s_options.get('oks') and len(doc) < top_limit: try: code = CodeOKS.objects.filter( code__contains=s_options.get('oks')) for c in code: doc.append(Documents.objects.filter(classifier_oks=c)) except Exception as ex: LOGGER.error(ex) if s_options.get('tk') and len(doc) < top_limit: try: doc.append( Documents.objects.filter( tk_rus__contains=s_options.get('tk'))) except Exception as ex: LOGGER.error(ex) if s_options.get('mtk') and len(doc) < top_limit: try: doc.append( Documents.objects.filter( mtk_dev__contains=s_options.get('mtk'))) except Exception as ex: LOGGER.error(ex) if s_options.get('keywords') and len(doc) < top_limit: try: doc.append( Documents.objects.filter( keywords__contains=s_options.get('keywords'))) except Exception as ex: LOGGER.error(ex) if s_options.get('date_of_adoption') and len(doc) < top_limit: try: search_date = datetime.strptime( s_options.get('date_of_adoption'), '%Y-%m-%d') doc.append( Documents.objects.filter(doc_assign_date=search_date)) except Exception as ex: LOGGER.error(ex) if s_options.get('effective_date') and len(doc) < top_limit: try: search_date = datetime.strptime( s_options.get('effective_date'), '%Y-%m-%d') doc.append( Documents.objects.filter( doc_effective_date=search_date)) except Exception as ex: LOGGER.error(ex) if s_options.get('recover_date') and len(doc) < top_limit: try: search_date = datetime.strptime( s_options.data['recover_date'], '%Y-%m-%d') doc.append( Documents.objects.filter( doc_restoration_date=search_date)) except Exception as ex: LOGGER.error(ex) if len(doc) < page * page_size: return JsonResponse({'documents': []}) doc_list = doc[(page - 1) * page_size:page * page_size] for d in doc_list: for i in range(len(d)): doc_set.add(d[i].pk) for docId in doc_set: ds = DocumentsSerializer(Documents.objects.get(pk=docId)) result.append(ds.data) return JsonResponse({'documents': result, 'count': len(doc)}) return JsonResponse({'documents': [], 'count': 0})
def add(request): """ Handling the add of a new search """ user = request.user.username first = request.user.first_name last = request.user.last_name if request.method == 'POST': form = AddForm(request.POST) if form.is_valid(): word = form.cleaned_data['word'] domain = form.cleaned_data['domain'] # option = form.cleaned_data['option'] Word.objects.get_or_create(expression = word) # for opt in option: # o = Option.objects.filter(id = opt) # w.options.add(o.get(pk = opt)) try : _cron = Crontab.objects.filter(has_reached_limit = False)[0] except IndexError: num = Crontab.objects.count() if num == 0: _cron = Crontab(number_of_searches = 0, priority = 0) else: _cron = Crontab(number_of_searches = 0, priority = 1) for cron in Crontab.objects.all(): if cron.priority != 0: cron.priority = cron.priority + 1 cron.save() _cron.save() res = Search(name = word, words = word, cron = _cron) res.save() _cron.number_of_searches = _cron.number_of_searches + 1 if _cron.number_of_searches == 100: _cron.has_reached_limit = True _cron.save() dom = Domain.objects.filter(id = domain) res.domains.add(dom.get(pk = domain)) return HttpResponseRedirect('../view/') else: form = AddForm() return render_to_response('add.html', {'form' : form, 'user' : user, 'first' : first, 'last' : last,}, context_instance=RequestContext(request) )