def menu(db, cursor): try: print("---Selamat Datang Di Program CRUD---\n" "1. INSERT Data\n" "2. READ Data\n" "3. UPDATE Data\n" "4. DELETE Data\n" "5. SEARCH Data\n" "6. Update Data Admin\n" "Tekan Tombol CTRL-C Untuk Keluar Program") pil = input("Masukkan Pilihan Anda: ") if pil in ('1', '2', '3', '4', '5', '6'): if pil == '1': ins.insert(db, cursor) elif pil == '2': rd.read(db, cursor) elif pil == '3': up.update(db, cursor) elif pil == '4': delt.delete(db, cursor) elif pil == '5': sr.search(db, cursor) elif pil == '6': system('python update-admin.py') else: print("Input Salah") else: print("Pilihan Tidak Ada") except KeyboardInterrupt: print("\nTerima Kasih :)\nExiting") time.sleep(0.3) cs.sys.exit()
def analyse(path): print prettyText("[*] Parsing Project at %s ..." % path, 'blue') p = parser.PHPProject(path) print prettyText("[*] Parsing Completed !", 'blue') ''' print prettyText("[*] Searching for dangerous methods",'blue') for category in vulndb.A_F_ALL.keys(): print prettyText("[**] Category: %s" % category,['yellow','bold']) for method in vulndb.A_F_ALL[category].keys(): print prettyText("[***] Method: %s" % str(method),['yellow']) found = search(p,functionClassFilter,method) print parseFound(found,vulndb.A_F_ALL[category][method]) ''' print prettyText("[*] Searching for dangerous methods inheritence", 'blue') files = search(p, classFilter, phply.phpast.Function) for name in files: print prettyText('[*] File: %s' % name, ['yellow', 'bold']) functions = files[name] #print functions for l in functions: paramsList = search(l, classFilter, phply.phpast.FormalParameter) functionInputParams = dict() for p in paramsList: functionInputParams[p] = "ANY" #print functionInputParams tst = generateTST(l, functionInputParams) #print tst #search a method and propagate taint for kcat in vulndb.A_F_ALL.keys(): cat = vulndb.A_F_ALL[kcat] #print prettyText('[*] Category: %s' % kcat,'red') for e in cat: #print prettyText('[*] Method: %s' % str(e),'blue') functions = search(l, functionFilter, e) #print '-'*5 for f in functions: for pos in cat[e]: try: #print prettyText('[*] FOUND : ' + str(f.params[pos].node) + ':' + str(tst.getTaint(f.params[pos].node)), 'green') if tst.getTaint(f.params[pos].node) > 0: print '-' * 5 print prettyText('[*] Category: %s' % kcat, 'red') print prettyText('[*] Method: %s' % str(e), 'blue') print prettyText( "[+] FOUND: %s" % str(l.name), 'green') print '-' * 5 except IndexError, AttributeError: print prettyText('[!] ERROR: %s' % str(f), 'red')
def analyse(path): print prettyText("[*] Parsing Project at %s ..." % path,'blue') p = parser.PHPProject(path) print prettyText("[*] Parsing Completed !",'blue') ''' print prettyText("[*] Searching for dangerous methods",'blue') for category in vulndb.A_F_ALL.keys(): print prettyText("[**] Category: %s" % category,['yellow','bold']) for method in vulndb.A_F_ALL[category].keys(): print prettyText("[***] Method: %s" % str(method),['yellow']) found = search(p,functionClassFilter,method) print parseFound(found,vulndb.A_F_ALL[category][method]) ''' print prettyText("[*] Searching for dangerous methods inheritence",'blue') files = search(p,classFilter, phply.phpast.Function) for name in files: print prettyText('[*] File: %s' % name,['yellow','bold']) functions = files[name] #print functions for l in functions: paramsList = search(l,classFilter, phply.phpast.FormalParameter) functionInputParams = dict() for p in paramsList: functionInputParams[p] = "ANY" #print functionInputParams tst = generateTST(l,functionInputParams) #print tst #search a method and propagate taint for kcat in vulndb.A_F_ALL.keys(): cat = vulndb.A_F_ALL[kcat] #print prettyText('[*] Category: %s' % kcat,'red') for e in cat: #print prettyText('[*] Method: %s' % str(e),'blue') functions = search(l, functionFilter, e) #print '-'*5 for f in functions: for pos in cat[e]: try: #print prettyText('[*] FOUND : ' + str(f.params[pos].node) + ':' + str(tst.getTaint(f.params[pos].node)), 'green') if tst.getTaint(f.params[pos].node) > 0: print '-'*5 print prettyText('[*] Category: %s' % kcat,'red') print prettyText('[*] Method: %s' % str(e),'blue') print prettyText("[+] FOUND: %s" % str(l.name), 'green') print '-'*5 except IndexError, AttributeError: print prettyText('[!] ERROR: %s' % str(f), 'red')
def searchMethod(path,method): print prettyText("[*] Parsing Project at %s ..." % path,'blue') p = parser.PHPProject(path) print prettyText("[*] Parsing Completed !",'blue') print prettyText("[***] Method: %s" % str(method),['yellow']) found = search(p,functionClassFilter,method) print parseFound(found,"Custom")
def event_filtering_helper(category, request): if category=="past": events_sqs = get_past_events_sqs(request.user) elif category=="cancelled": events_sqs = get_cancelled_events_sqs(request.user) elif category=="archived": events_sqs = get_archived_events_sqs(request.user) elif category=="attended": events_sqs = get_attended_events_sqs(request.user) else: events_sqs = get_upcoming_events_sqs(request.user) events_exist = len(events_sqs) > 0 type = request.GET.get("type", ALL) if type=="e": events_sqs = events_sqs.filter(is_deadline=False) elif type=="d": events_sqs = events_sqs.filter(is_deadline=True) elif type=="r": events_sqs = events_sqs.filter(is_drop=True) if request.GET.get('query'): events_sqs = search(events_sqs, request.GET.get('query')) if category =="upcoming": return get_categorized_events_context(events_exist, events_sqs, request.user) else: event_objects = [sr.object for sr in events_sqs.load_all()] return events_exist, map(event_map, event_objects, [request.user]*len(event_objects))
def searchMethod(path, method): print prettyText("[*] Parsing Project at %s ..." % path, 'blue') p = parser.PHPProject(path) print prettyText("[*] Parsing Completed !", 'blue') print prettyText("[***] Method: %s" % str(method), ['yellow']) found = search(p, functionClassFilter, method) print parseFound(found, "Custom")
def do(self): if self.check_command() == False: print ('Command not found') return '\n' if self.expr[0] == 'sort': from core.sort import sort action = sort(self.expr) print(action.parse()) elif self.expr[0] == 'cat': from core.cat import cat action = cat(self.expr) print(action.parse()) elif self.expr[0] == 'search': from core.search import search action = search(self.expr) print(action.parse()) elif self.expr[0] == 'list': from core.listall import list_all action = list_all(self.expr) print(action.parse()) elif self.expr[0] == 'bye': sys.exit()
def analyse(path, method): print prettyText("[*] Parsing Project at %s ..." % path, 'blue') p = parser.PHPProject(path) print prettyText("[*] Parsing Completed !", 'blue') print prettyText("[*] Searching for calls to %s" % method, 'blue') found = search(p, functionMethodFilter, method) print parseFound(found)
async def search_handler(message: types.Message) -> None: """ Checks if sent message is a EN Wiki article and sends the result back. """ try: url = urlparse(message.text) if URL_REGEX.match(message.text) and url.path != "": if url.netloc == "en.wikipedia.org": await message.reply(search.search(message.text)) elif url.netloc == "en.m.wikipedia.org": await message.reply( search.search(message.text.replace("en.m.wiki", "en.wiki")) ) else: await message.reply(config["WRONG_URL_MESSAGE"]) else: await message.reply(config["WRONG_URL_MESSAGE"]) except Exception: await message.reply(config["ERROR_MESSAGE"])
def get_location_guess(request): if not request.has_key("query"): raise Http400("Request GET is missing the query.") sqs = SearchQuerySet().models(Location) sqs = search(sqs, request.GET.get('query', ""))[:10] if not sqs or len(sqs) > 1: data = {'single':False, 'query':request.GET['query']} else: loc = sqs[0].object data = {'single':True, 'latitude':loc.latitude, 'longitude':loc.longitude} return HttpResponse(simplejson.dumps(data), mimetype="application/json")
def analyse(path): print prettyText("[*] Parsing Project at %s ..." % path,'blue') p = parser.PHPProject(path) print prettyText("[*] Parsing Completed !",'blue') print prettyText("[*] Searching for dangerous methods",'blue') for category in vulndb.A_F_ALL.keys(): print prettyText("[**] Category: %s" % category,['yellow','bold']) for method in vulndb.A_F_ALL[category].keys(): print prettyText("[***] Method: %s" % str(method),['yellow']) found = search(p,functionClassFilter,method) print parseFound(found,vulndb.A_F_ALL[category][method])
def analyse(path): print prettyText("[*] Parsing Project at %s ..." % path, 'blue') p = parser.PHPProject(path) print prettyText("[*] Parsing Completed !", 'blue') print prettyText("[*] Searching for dangerous methods", 'blue') for category in vulndb.A_F_ALL.keys(): print prettyText("[**] Category: %s" % category, ['yellow', 'bold']) for method in vulndb.A_F_ALL[category].keys(): print prettyText("[***] Method: %s" % str(method), ['yellow']) found = search(p, functionClassFilter, method) print parseFound(found, vulndb.A_F_ALL[category][method])
def search(cls, expression, page=1, per_page=10): hits, total = search.search( expression, model_types=all_subclasses(cls), page=page, per_page=per_page ) ids = [h['id'] for h in hits] whens = [(id, i) for i, id in enumerate(ids)] if hits: return cls.query.filter(cls.id.in_(ids)).order_by(db.case(whens, value=cls.id)), total else: return cls.query.filter_by(id=0), 0
def get_location_guess(request): if not request.has_key("query"): raise Http400("Request GET is missing the query.") sqs = SearchQuerySet().models(Location) sqs = search(sqs, request.GET.get('query', ""))[:10] if not sqs or len(sqs) > 1: data = {'single': False, 'query': request.GET['query']} else: loc = sqs[0].object data = { 'single': True, 'latitude': loc.latitude, 'longitude': loc.longitude } return HttpResponse(simplejson.dumps(data), mimetype="application/json")
def employer_search_helper(request): search_results = SearchQuerySet().models(Employer).filter(visible=True) if request.GET.get('subscribed', False) == 'true': search_results = search_results.filter(subscribers=request.user.id) # filter by whether the employer has an upcoming event or not if request.GET.get('has_public_events_deadlines', False) == "true": search_results = search_results.filter(has_public_events=True) # filter by industry industry_id = request.GET.get('i', None) if industry_id: search_results = search_results.filter(industries=industry_id) # search if request.GET.get('q'): search_results = search(search_results, request.GET.get('q')) # Extract the object. employers = map(lambda n: n.object, search_results) # Sort the employers. return sorted(employers, key=lambda n: n.name)
def employer_search_helper(request): search_results = SearchQuerySet().models(Employer).filter(visible=True) if request.GET.get('subscribed', False)=='true': search_results = search_results.filter(subscribers=request.user.id) # filter by whether the employer has an upcoming event or not if request.GET.get('has_public_events_deadlines', False)=="true": search_results = search_results.filter(has_public_events=True) # filter by industry industry_id = request.GET.get('i', None) if industry_id: search_results = search_results.filter(industries=industry_id) # search if request.GET.get('q'): search_results = search(search_results, request.GET.get('q')) # Extract the object. employers = map(lambda n: n.object, search_results) # Sort the employers. return sorted(employers, key=lambda n: n.name)
def search_user(): q = { 'from': int(request.args.get('offset', 0)), 'size': int(request.args.get('limit', 10)), 'query': { 'match': { 'name': request.args.get('q', '') } } } res = search.search(index=SEARCH_INDEX, doc_type='user', body=q) total = res['hits']['total'] users = [] for hit in res['hits']['hits']: user = hit['_source'] user['_id'] = user.pop('user_id') users.append(user) result = { 'total': total, 'users': users } return bson.json_util.dumps(result)
def generateCachedPaths(): root = (os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))) start_time = time.time() n = 0 total = len(randompath.sources)*len(randompath.destinations) for s in randompath.sources: for d in randompath.destinations: print ("Calculation for {0} to {1}".format(s,d)) r = dict() try: r = search.search(s,d) r['source'] = s r['destination'] = d print("Execution time: %s" % str(r['execution_time'])) pickle.dump(r,open( "core/cached_paths/%s.dump" % hash('{0}_{1}'.format(s,d)), "wb" )) except: print("Could not find path between {0} and {1}".format(s,d)) print(sys.exc_info()) gc.collect() elapsed_time = time.time() - start_time n += 1 remaining_time = elapsed_time * total / n - elapsed_time print("{0} of {3} Elapsed: {1} ETR: {2}".format(n,elapsed_time,remaining_time,total))
import phply from core.search import search from core.filters import classFilter, functionFilter from taint.taint import * import sys import os import vulndb # p = "/home/asm/Documents/Aptana Studio 3 Workspace/code_review_framework/samples/webERP/CopyBOM.php" path = os.path.abspath(sys.argv[1]) # path = os.path.abspath(p) f = parser.PHPFile(path) functions = search(f,classFilter, phply.phpast.Function) #print functions for l in functions: paramsList = search(l,classFilter, phply.phpast.FormalParameter) functionInputParams = dict() for p in paramsList: functionInputParams[p] = "ANY" print functionInputParams tst = generateTST(l,functionInputParams) print tst #search a method and propagate taint for kcat in vulndb.A_F_ALL.keys(): cat = vulndb.A_F_ALL[kcat] print prettyText('[*] Category: %s' % kcat,'red')
def employer_students(request, extra_context=None): context = {} if request.is_ajax(): student_list = request.GET["student_list"] student_list_id = request.GET["student_list_id"] recruiter = request.user.recruiter if student_list == student_enums.GENERAL_STUDENT_LISTS[0][1]: students = SearchQuerySet().models(Student).filter(visible=True) else: # if student_list == student_enums.GENERAL_STUDENT_LISTS[1][1]: # students = get_unlocked_students(recruiter.employer, request.META['has_at_least_premium']) if student_list == student_enums.GENERAL_STUDENT_LISTS[1][1]: students = recruiter.employer.starred_students.all() elif student_list == student_enums.GENERAL_STUDENT_LISTS[2][1]: try: resume_book = ResumeBook.objects.get(recruiter=recruiter, delivered=False) except ResumeBook.DoesNotExist: resume_book = ResumeBook.objects.create(recruiter=recruiter) students = resume_book.students.visible() else: parts = student_list.split(" ") if parts[-1] == "RSVPs" or parts[-1] == "Attendees" or parts[-1] == "Drop" and parts[-2] == "Resume": try: e = Event.objects.get(id=student_list_id) except: raise Http404 if parts[-1] == "RSVPs": students = Student.objects.visible().filter( rsvp__in=e.rsvp_set.filter(attending=True), profile_created=True ) elif parts[-1] == "Attendees": students = Student.objects.visible().filter( attendee__in=e.attendee_set.all(), profile_created=True ) elif parts[-1] == "Drop" and parts[-2] == "Resume": students = Student.objects.visible().filter( droppedresume__in=e.droppedresume_set.all(), profile_created=True ) else: students = ResumeBook.objects.get(id=student_list_id).students.visible() students = SearchQuerySet().models(Student).filter(obj_id__in=[student.id for student in students]) am_filtering = False if request.GET.has_key("gpa"): am_filtering = True students = students.filter(gpa__gte=request.GET["gpa"]) if request.GET.has_key("act"): am_filtering = True students = students.filter(act__gte=request.GET["act"]) if request.GET.has_key("sat_t"): am_filtering = True students = students.filter(sat_t__gte=request.GET["sat_t"]) if request.GET.has_key("sat_m"): am_filtering = True students = students.filter(sat_m__gte=request.GET["sat_m"]) if request.GET.has_key("sat_v"): am_filtering = True students = students.filter(sat_v__gte=request.GET["sat_v"]) if request.GET.has_key("sat_w"): am_filtering = True students = students.filter(sat_w__gte=request.GET["sat_w"]) if request.GET.has_key("degree_programs"): am_filtering = True students = students.filter(degree_program__in=request.GET["degree_programs"].split("~")) if request.GET.has_key("graduation_years"): am_filtering = True students = students.filter(graduation_year__in=request.GET["graduation_years"].split("~")) if request.GET.has_key("employment_types"): am_filtering = True students = students.filter(looking_for__in=request.GET["employment_types"].split("~")) if request.GET.has_key("previous_employers"): am_filtering = True students = students.filter(previous_employers__in=request.GET["previous_employers"].split("~")) if request.GET.has_key("industries_of_interest"): am_filtering = True students = students.filter(industries_of_interest__in=request.GET["industries_of_interest"].split("~")) if request.GET.has_key("languages"): am_filtering = True students = students.filter(languages__in=request.GET["languages"].split("~")) if request.GET.has_key("campus_orgs"): am_filtering = True students = students.filter(campus_involvement__in=request.GET["campus_orgs"].split("~")) if request.GET.has_key("countries_of_citizenship"): am_filtering = True students = students.filter(countries_of_citizenship__in=request.GET["countries_of_citizenship"].split("~")) if request.GET["older_than_21"] != "N": am_filtering = True students = students.filter(older_than_21=True) if request.GET.has_key("schools"): am_filtering = True school_ids = request.GET["schools"].split("~") schools = School.objects.filter(id__in=school_ids) domain_names = DomainName.objects.filter(school__in=schools) domains = map(lambda x: x.domain, domain_names) if domains and students: students = students.filter(reduce(operator.or_, (SQ(email__contains=x) for x in domains))) if request.GET.has_key("courses"): am_filtering = True courses = request.GET["courses"].split("~") students = students.filter(SQ(first_major__in=courses) | SQ(second_major__in=courses)) if request.GET.has_key("query"): students = search(students, request.GET["query"]) # Highlight the results students = students.highlight() results_per_page = int(request.GET["results_per_page"]) start_index = results_per_page * (int(request.GET["page"]) - 1) count = students.count() if start_index >= count: start_index = 0 ordered_results = order_results(students, request)[start_index : start_index + results_per_page] ordered_result_objects = [] for search_result in ordered_results: if search_result.highlighted: ordered_result_object = (search_result.object, search_result.highlighted["text"][0]) else: ordered_result_object = (search_result.object, "") ordered_result_objects.append(ordered_result_object) padded_ordered_results = [""] * count for i in range(len(ordered_result_objects)): padded_ordered_results[i + start_index] = ordered_result_objects[i] paginator = DiggPaginator(padded_ordered_results, results_per_page, body=3, padding=1, margin=2) context["filtering"] = am_filtering try: page = paginator.page(request.GET["page"]) except EmptyPage: page = paginator.page(1) context["page"] = page context["results"] = process_results(request.user.recruiter, request.META["has_at_least_premium"], page) context["current_student_list"] = request.GET["student_list"] context["total_results_num"] = count # I don't like this method of statistics if request.user.recruiter.employer.name != "Umeqo": for ( student, highlighted_text, is_in_resume_book, is_starred, comment, num_of_events_attended, visible, school, ) in context["results"]: if student: student.studentstatistics.shown_in_results_count += 1 student.studentstatistics.save() resume_book = ResumeBook.objects.get(recruiter=request.user.recruiter, delivered=False) if len(resume_book.students.visible()) >= s.RESUME_BOOK_CAPACITY: context["resume_book_capacity_reached"] = True context["TEMPLATE"] = "employer_students_results.html" context.update(extra_context or {}) return context else: page_messages = { "NO_STUDENTS_SELECTED_MESSAGE": messages.no_students_selected, "WAIT_UNTIL_RESUME_BOOK_IS_READY_MESSAGE": messages.wait_until_resume_book_is_ready, } context["page_messages"] = page_messages context["query"] = request.GET.get("query", "") # Passing the employer id to generate tha appropriate student list choices context["student_filtering_form"] = StudentFilteringForm( initial={ "has_at_least_premium": request.META["has_at_least_premium"], "recruiter_id": request.user.recruiter.id, "ordering": request.user.recruiter.recruiterpreferences.default_student_result_ordering, "results_per_page": request.user.recruiter.recruiterpreferences.default_student_results_per_page, } ) context["student_search_form"] = StudentSearchForm() context["added"] = employer_enums.ADDED context["starred"] = employer_enums.STARRED context["email_delivery_type"] = core_enums.EMAIL context["in_resume_book_student_list"] = student_enums.GENERAL_STUDENT_LISTS[2][1] context["resume_book_capacity"] = s.RESUME_BOOK_CAPACITY context["TEMPLATE"] = "employer_students.html" context.update(extra_context or {}) return context
def respond(result): answer = [] # If there are no definitions defined, find new ones if "keywords" in result: answer = search.search(result["keywords"]) else: definitions = result["definitions"] used_stems = [] # Only use standalone definitions for now without_aggregates = [definition for definition in definitions if "using" not in definition] # Check if two or more definitions are the same, if so use them for answer for definition in without_aggregates: matches = 0 other_definitions = list(without_aggregates) other_definitions.remove(definition) for other in other_definitions: if definition["text"] == other["text"]: matches += 1 if matches > 0: used_stems.append(definition["stem"]) # TODO: save with id_str from returned Twitter status update object # database.answers.save({"text": definitionA["text"], "definitions": [definitionA["_id"]]}) # Check if matches were found # The idea here is to create new aggregate definitions from matching standalone definitions # Example: apple has "fruit" and "from tree", pear only has "fruit" # In this case a new definition "fruit; from tree" will be created and saved for both stems if len(used_stems) > 0: text = "" ids = [] # Create list of standalone definitions based on usable stems found in previous loop usable_definitions = [definition for definition in definitions if definition["stem"] in used_stems and "using" not in definition] # Append all text of unique definitions together, create list of all ids for i, definition in enumerate(usable_definitions): if definition["text"] not in text: if i > 0: text += "; " + definition["text"] else: text += definition["text"] ids.append(definition["_id"]) # Create new definition for stem if it doesn't exist yet for stem in used_stems: if not definition_model.text_exists_for_stem(stem, text) and not any(definition["text"] == text for definition in answer): new_definition = {"stem": stem, "text": text, "score": 0.5, "using": ids} answer.append(new_definition) print("Saving new definition: " + str(new_definition)) database.definitions.save(new_definition) # If no matches were found, try finding an existing aggregate or create a new one # The idea here is to use or create aggregate definitions no matter how related they are # Example: apple has "fruit", pear has "green", lemon has "from tree" # In this case, it'll check if there's already a "fruit; green; from tree" definition and if there is use that # If there's not, it'll create that and save it as a definition for all three used stems else: text = "" ids = [] new_definitions = [] used_stems = [] # Append all text of definitions together, create list of all ids for i, definition in enumerate(definitions): if i > 0: text += "; " + definition["text"] else: text += definition["text"] ids.append(definition["_id"]) for definition in definitions: # Only process every stem once if definition["stem"] not in used_stems: used_stems.append(definition["stem"]) # Check if there already is an aggregate definition, if so take highest scoring one highest_scoring_aggregate = definition_model.get_highest_scoring_aggregate(definition["stem"]) if highest_scoring_aggregate is not None: other_ids = list(ids) other_ids.remove(highest_scoring_aggregate["_id"]) # Check if the aggregate definition is made up of all the other given definitions if all(definition_id in highest_scoring_aggregate["using"] for definition_id in other_ids): print("Using existing aggregate definition") # If it is, add it as part of the answer answer.append(highest_scoring_aggregate) # If there is no aggregate, make a new one using the text/ids of all definitions combined else: new_definition = {"stem": definition["stem"], "text": text, "score": 0.5, "using": ids} # Keep track of the new definitions new_definitions.append(new_definition) print("Saving new definition: " + str(new_definition)) database.definitions.save(new_definition) # If answer is empty that means no existing aggregate definitions were found, so use the new ones if len(answer) == 0: answer = new_definitions print("Produced answer: " + str(answer)) return answer
def select(sql): search.search(sql)
def employer_students(request, extra_context=None): context = {} if request.is_ajax(): student_list = request.GET['student_list'] student_list_id = request.GET['student_list_id'] recruiter = request.user.recruiter if student_list == student_enums.GENERAL_STUDENT_LISTS[0][1]: students = SearchQuerySet().models(Student).filter(visible=True) else: #if student_list == student_enums.GENERAL_STUDENT_LISTS[1][1]: # students = get_unlocked_students(recruiter.employer, request.META['has_at_least_premium']) if student_list == student_enums.GENERAL_STUDENT_LISTS[1][1]: students = recruiter.employer.starred_students.all() elif student_list == student_enums.GENERAL_STUDENT_LISTS[2][1]: try: resume_book = ResumeBook.objects.get(recruiter = recruiter, delivered=False) except ResumeBook.DoesNotExist: resume_book = ResumeBook.objects.create(recruiter = recruiter) students = resume_book.students.visible() else: parts = student_list.split(" ") if parts[-1] == "RSVPs" or parts[-1] == "Attendees" or parts[-1] == "Drop" and parts[-2] == "Resume": try: e = Event.objects.get(id = student_list_id) except: raise Http404 if parts[-1] == "RSVPs": students = Student.objects.visible().filter(rsvp__in=e.rsvp_set.filter(attending=True), profile_created=True) elif parts[-1] == "Attendees": students = Student.objects.visible().filter(attendee__in=e.attendee_set.all(), profile_created=True) elif parts[-1] == "Drop" and parts[-2] == "Resume": students = Student.objects.visible().filter(droppedresume__in=e.droppedresume_set.all(), profile_created=True) else: students = ResumeBook.objects.get(id = student_list_id).students.visible() students = SearchQuerySet().models(Student).filter(obj_id__in = [student.id for student in students]) am_filtering = False if request.GET.has_key('gpa'): am_filtering = True students = students.filter(gpa__gte = request.GET['gpa']) if request.GET.has_key('act'): am_filtering = True students = students.filter(act__gte = request.GET['act']) if request.GET.has_key('sat_t'): am_filtering = True students = students.filter(sat_t__gte = request.GET['sat_t']) if request.GET.has_key('sat_m'): am_filtering = True students = students.filter(sat_m__gte = request.GET['sat_m']) if request.GET.has_key('sat_v'): am_filtering = True students = students.filter(sat_v__gte = request.GET['sat_v']) if request.GET.has_key('sat_w'): am_filtering = True students = students.filter(sat_w__gte = request.GET['sat_w']) if request.GET.has_key('degree_programs'): am_filtering = True students = students.filter(degree_program__in = request.GET['degree_programs'].split('~')) if request.GET.has_key('graduation_years'): am_filtering = True students = students.filter(graduation_year__in = request.GET['graduation_years'].split('~')) if request.GET.has_key('employment_types'): am_filtering = True students = students.filter(looking_for__in = request.GET['employment_types'].split('~')) if request.GET.has_key('previous_employers'): am_filtering = True students = students.filter(previous_employers__in = request.GET['previous_employers'].split('~')) if request.GET.has_key('industries_of_interest'): am_filtering = True students = students.filter(industries_of_interest__in = request.GET['industries_of_interest'].split('~')) if request.GET.has_key('languages'): am_filtering = True students = students.filter(languages__in = request.GET['languages'].split('~')) if request.GET.has_key('campus_orgs'): am_filtering = True students = students.filter(campus_involvement__in = request.GET['campus_orgs'].split('~')) if request.GET.has_key('countries_of_citizenship'): am_filtering = True students = students.filter(countries_of_citizenship__in = request.GET['countries_of_citizenship'].split('~')) if request.GET['older_than_21'] != 'N': am_filtering = True students = students.filter(older_than_21 = True) if request.GET.has_key('schools'): am_filtering = True school_ids = request.GET['schools'].split('~') schools = School.objects.filter(id__in = school_ids) domain_names = DomainName.objects.filter(school__in=schools) domains = map(lambda x: x.domain, domain_names) if domains and students: students = students.filter(reduce(operator.or_, (SQ(email__contains=x) for x in domains))) if request.GET.has_key('courses'): am_filtering = True courses = request.GET['courses'].split('~') students = students.filter(SQ(first_major__in = courses)|SQ(second_major__in = courses)) if request.GET.has_key('query'): students = search(students, request.GET['query']) # Highlight the results students = students.highlight() results_per_page = int(request.GET['results_per_page']) start_index = results_per_page * (int(request.GET['page']) - 1) count = students.count() if start_index >= count: start_index = 0 ordered_results = order_results(students, request)[start_index:start_index + results_per_page] ordered_result_objects = [] for search_result in ordered_results: if search_result.highlighted: ordered_result_object = (search_result.object, search_result.highlighted['text'][0]) else: ordered_result_object = (search_result.object, "") ordered_result_objects.append(ordered_result_object) padded_ordered_results = ['']*count for i in range(len(ordered_result_objects)): padded_ordered_results[i + start_index] = ordered_result_objects[i] paginator = DiggPaginator(padded_ordered_results, results_per_page, body=3, padding=1, margin=2) context['filtering'] = am_filtering try: page = paginator.page(request.GET['page']) except EmptyPage: page = paginator.page(1) context['page'] = page context['results'] = process_results(request.user.recruiter, request.META['has_at_least_premium'], page) context['current_student_list'] = request.GET['student_list'] context['total_results_num'] = count # I don't like this method of statistics if request.user.recruiter.employer.name != "Umeqo": for student, highlighted_text, is_in_resume_book, is_starred, comment, num_of_events_attended, visible, school in context['results']: if student: student.studentstatistics.shown_in_results_count += 1 student.studentstatistics.save() resume_book = ResumeBook.objects.get(recruiter = request.user.recruiter, delivered=False) if len(resume_book.students.visible()) >= s.RESUME_BOOK_CAPACITY: context['resume_book_capacity_reached'] = True context['TEMPLATE'] = 'employer_students_results.html' context.update(extra_context or {}) return context else: page_messages = { 'NO_STUDENTS_SELECTED_MESSAGE': messages.no_students_selected, 'WAIT_UNTIL_RESUME_BOOK_IS_READY_MESSAGE': messages.wait_until_resume_book_is_ready } context['page_messages'] = page_messages context['query'] = request.GET.get('query', '') # Passing the employer id to generate tha appropriate student list choices context['student_filtering_form'] = StudentFilteringForm(initial={ 'has_at_least_premium':request.META['has_at_least_premium'], 'recruiter_id': request.user.recruiter.id, 'ordering': request.user.recruiter.recruiterpreferences.default_student_result_ordering, 'results_per_page': request.user.recruiter.recruiterpreferences.default_student_results_per_page }) context['student_search_form'] = StudentSearchForm() context['added'] = employer_enums.ADDED context['starred'] = employer_enums.STARRED context['email_delivery_type'] = core_enums.EMAIL context['in_resume_book_student_list'] = student_enums.GENERAL_STUDENT_LISTS[2][1] context['resume_book_capacity'] = s.RESUME_BOOK_CAPACITY context['TEMPLATE'] = 'employer_students.html' context.update(extra_context or {}) return context