def get(self, request): QueueConnection().purge(QueueNames.scraping_service) for scraper_job_id in ScraperJob.objects.filter( running=True).values_list("id", flat=True): QueueConnection.quick_publish( QueueNames.scraping_service, json.dumps({"scraper_job_id": scraper_job_id})) messages.success(request, "ScraperJob queue successfully re-built.") return redirect("scraping_manager:index")
def handle_candidate_unsubscribe(candidate): candidate.responded = True candidate.save(update_fields=['responded']) profile = employer_candidate.profile profile.hide_from_search = True profile.save(update_fields=['hide_from_search']) QueueConnection.quick_publish( queue_name=QueueNames.on_demand_view_refresher)
def queue_job_rescore(job_id, skip_delta_days=None): print("queue_job_rescore function------------") QueueConnection.quick_publish(queue_name=QueueNames.scoring_service, body=json.dumps({ 'mode': 'system', 'job_id': job_id, 'skip_delta_days': skip_delta_days }))
def on_resume(data, url): previously_scraped = ProfileResume.objects.filter( url=url).order_by('-date_created').first() # Always create a new ProfileResume to preserve precious data profile_resume = ProfileResume(url=url, source=scraper_job.source, parser_output=data) # Copy over the profile from the old resume, if it exists if previously_scraped and previously_scraped.profile: profile_resume.profile = previously_scraped.profile # Ensure the ProfileResume has an attached profile. if not profile_resume.profile: p = Profile() p.save() profile_resume.profile = p profile_resume.save() # Ensure that the attached profile is mapped to at least one job. if not ProfileJobMapping.objects.filter( profile_id=profile_resume.profile_id, job_id=scraper_job.job_id).exists(): ProfileJobMapping(profile_id=profile_resume.profile_id, job_id=scraper_job.job_id).save() self.logger.info("Mapped Profile id {} to Job id {}.".format( profile_resume.profile_id, scraper_job.job_id)) self.logger.info("Created ProfileResume id {}{}.".format( profile_resume.id, " (previously scraped)" if previously_scraped else "")) scraper_job.refresh_from_db() if previously_scraped: scraper_job.resumes_rescraped += 1 else: scraper_job.new_resumes_scraped += 1 scraper_job.save() print("scraper_job saved--------------") print("Starting build profile service now----------") print("profile_resume.profile.id : ", profile_resume.profile.id) print("profile_resume.id : ", profile_resume.id) QueueConnection.quick_publish(queue_name=QueueNames.build_profile, body=json.dumps({ 'profile_id': profile_resume.profile.id, 'profile_resume_id': profile_resume.id })) return True
def queue_request(score_request, priority=None, skip_delta_days=None): if score_request.id is not None: QueueConnection.quick_publish(queue_name=QueueNames.scoring_service, body=json.dumps({ 'mode': 'single', 'skip_delta_days': skip_delta_days, 'score_request_id': score_request.id }), priority=priority)
def get(self, request, ranking_job_id): ranking_job = RankingJob.objects.get(id=int(ranking_job_id)) if not ranking_job.running: ranking_job.running = True ranking_job.save() QueueConnection.quick_publish( QueueNames.icims_service, json.dumps({"ranking_job_id": ranking_job_id})) messages.success(request, 'Started scraping RankingJob') else: ranking_job.running = False ranking_job.save() messages.success( request, 'Stopped scraping RankingJob id {}. Will pick up where left off if restarted.' .format(ranking_job.id)) return redirect('icims_manager:index')
def post(self, request): form = RankingJobForm(request.POST) if form.is_valid(): ranking_job = RankingJob() ranking_job.start_date = datetime.datetime.today().strftime( '%Y-%m-%d') ranking_job.save() QueueConnection.quick_publish( QueueNames.icims_service, json.dumps({"ranking_job_id": ranking_job.id})) messages.success(request, 'Queued RankingJob {}.'.format(ranking_job)) else: messages.error(request, 'Please fix the validation errors and try again.') return redirect('icims_manager:index')
def wait_rabbitmq(): # If we're in production, RabbitMQ is already running. if is_local_env(): while True: try: QueueConnection() break except Exception as e: time.sleep(1) continue
def post(self, request): form = ScraperJobForm(request.POST) if form.is_valid(): scraper_job = ScraperJob() scraper_job.start_url = form.cleaned_data['start_url'] scraper_job.job_id = form.cleaned_data['job'] scraper_job.save() QueueConnection.quick_publish( QueueNames.scraping_service, json.dumps({"scraper_job_id": scraper_job.id})) messages.success(request, 'Queued ScraperJob {}.'.format(scraper_job)) else: messages.error(request, 'Please fix the validation errors and try again.') return redirect('scraping_manager:index')
def get(self, request, scraper_job_id): scraper_job = ScraperJob.objects.get(id=int(scraper_job_id)) if not scraper_job.running: scraper_job.running = True scraper_job.save() QueueConnection.quick_publish( QueueNames.scraping_service, json.dumps({"scraper_job_id": scraper_job_id})) messages.success( request, 'Started scraping ScraperJob id {} starting at resume #{}.'. format(scraper_job.id, scraper_job.start_offset)) else: scraper_job.running = False scraper_job.save() messages.success( request, 'Stopped scraping ScraperJob id {}. Will pick up where left off if restarted.' .format(scraper_job.id)) return redirect('scraping_manager:index')
def go(self, request, params): request = ProfileVerificationRequest.objects.filter( id=int(params.get('request_id'))).first() if not request: return { "error": "Verification request does not exist.", "queued": False } request.profile.name_verification_completed = True request.profile.save() request.verified = True request.save() if request.callback_queue and request.callback_message: QueueConnection.quick_publish(queue_name=request.callback_queue, body=request.callback_message) return {"queued": True} return {"queued": False}
def on_resume(data, url): job_name = data["icims_job"][0]["value"] job_data = list(Job.objects.filter(job_name=job_name)) if len(job_data) > 0: job_id = job_data[0].id else: job_data = list( Job.objects.filter( job_name__icontains=job_name).order_by('job_name')) if len(job_data) > 0: job_id = job_data[0].id else: job_id = 1 ranking_job_id = IcimsJobData.objects.get(job_title=job_name) previously_scraped = ProfileResume.objects.filter( url=url).order_by('-date_created').first() # Always create a new ProfileResume to preserve precious data profile_resume = ProfileResume(url=url, source=ranking_job.source, parser_output=data) # Copy over the profile from the old resume, if it exists if previously_scraped and previously_scraped.profile: profile_resume.profile = previously_scraped.profile # Ensure the ProfileResume has an attached profile. if not profile_resume.profile: p = Profile() p.save() profile_resume.profile = p profile_resume.save() ranking_job.job_id = job_id # Ensure that the attached profile is mapped to at least one icims job. if not IcimsProfileJobMapping.objects.filter( profile_id=profile_resume.profile_id, job_id=ranking_job.job_id, icims_job_id=ranking_job_id).exists(): IcimsProfileJobMapping(profile_id=profile_resume.profile_id, job_id=ranking_job.job_id, icims_job_id=ranking_job_id).save() self.logger.info( "Mapped Icims Profile id {} to icims Job id {}.".format( profile_resume.profile_id, ranking_job_id)) self.logger.info("Created ProfileResume id {}{}.".format( profile_resume.id, " (previously scraped)" if previously_scraped else "")) ranking_job.refresh_from_db() # Ensure that the attached profile is mapped to at least one job. if not ProfileJobMapping.objects.filter( profile_id=profile_resume.profile_id, job_id=ranking_job.job_id).exists(): ProfileJobMapping(profile_id=profile_resume.profile_id, job_id=ranking_job.job_id).save() self.logger.info("Mapped Profile id {} to Job id {}.".format( profile_resume.profile_id, ranking_job.job_id)) self.logger.info("Created ProfileResume id {}{}.".format( profile_resume.id, " (previously scraped)" if previously_scraped else "")) ranking_job.refresh_from_db() if previously_scraped: ranking_job.resumes_rescraped += 1 else: ranking_job.new_resumes_scraped += 1 ranking_job.save() QueueConnection.quick_publish(queue_name=QueueNames.build_profile, body=json.dumps({ 'profile_id': profile_resume.profile.id, 'profile_resume_id': profile_resume.id, 'source': ranking_job.source })) return True
def queue_notification(self, _id): QueueConnection.quick_publish(QueueNames.notification_service, json.dumps({"notification_id": _id}))
def send(self, *args, **kwargs): n = self.build(*args, **kwargs) n.save() QueueConnection.quick_publish(QueueNames.notification_service, json.dumps({'notification_id': n.id})) self.after_send(n, *args, **kwargs)
def get(self, request): QueueConnection().purge(QueueNames.scraping_service) ScraperJob.objects.all().update(running=False) messages.success(request, "Stopped all ScraperJobs.") return redirect('scraping_manager:index')
import sys import os import importlib import time import json from bakround_applicant.utilities.deployment import configure_django from bakround_applicant.services.queue import QueueConnection, QueueNames from bakround_applicant.all_models.db import Profile, ProfileVerificationRequest if __name__ == '__main__': configure_django(rabbitmq=True, postgres=True, default_local=True) profile_id = int(sys.argv[1]) if not Profile.objects.filter(id=profile_id).first(): print("Profile id {} does not exist.".format(profile_id)) sys.exit(1) rq = ProfileVerificationRequest.objects.filter(profile_id=profile_id).first() if not rq: rq = ProfileVerificationRequest(profile_id=profile_id) rq.use_manual = False rq.save() QueueConnection().publish(queue_name=QueueNames.verifying_service, body=json.dumps({ 'request_id': rq.id })) print("Queued Profile id {} (ProfileVerificationRequest id {}).".format(profile_id, rq.id))
def queue_job_remap(job_id): QueueConnection.quick_publish(queue_name=QueueNames.mapping_service, body=json.dumps({'job_id': job_id}))