def reschedule_all_failed(request): queue = get_failed_queue(django_rq.get_connection()) for job in queue.jobs: requeue_job(job.id, connection=queue.connection) return HttpResponse('Success')
def reports_file(request): job_id = request.GET.get('job_id') redis_conn = django_rq.get_connection() job = Job.fetch(job_id, redis_conn) response = HttpResponse(job.return_value, content_type="application/vnd.ms-excel") response['Content-Disposition'] = 'attachment; filename=report.xls' return response
def get_data(user, date=None): """Return a dict of data (redis keys/values) for the UserQueue for the given date.""" if date is None: date = timezone.now() date_string = date.strftime("%Y-%m-%d") conn = django_rq.get_connection('default') # Redis keys for the count, and all queues. keys = [ 'uq:{user_id}:{date_string}:count', 'uq:{user_id}:{date_string}:low', 'uq:{user_id}:{date_string}:medium', 'uq:{user_id}:{date_string}:high', ] keys = [k.format(user_id=user.id, date_string=date_string) for k in keys] # Get the list values, and convert them from bytes to utf data = {k: conn.lrange(k, 0, 100) for i, k in enumerate(keys) if i > 0} for key, values in data.items(): data[key] = [v.decode('utf8') for v in values] try: data[keys[0]] = int(conn.get(keys[0])) # then include the count except TypeError: # If `count` is None data[keys[0]] = 0 return data
def get_context_data(self, **kwargs): ctx = super(LongTaskCreateView, self).get_context_data(**kwargs) ctx['tasks'] = LongTask.objects.all().order_by('-created_on') redis_conn = django_rq.get_connection('default') ctx['queue'] = Queue(settings.DJANGO_TEST_RQ_LOW_QUEUE, connection=redis_conn) return ctx
def requeue_failed(): """Requeue jobs in the failed queue.""" connection = django_rq.get_connection() failed_queue = rq.queue.get_failed_queue(connection) job_ids = failed_queue.job_ids requeued = 0 for job_id in job_ids: try: job = rq.job.Job.fetch(job_id, connection=connection) except rq.job.NoSuchJobError: # Silently ignore/remove this job and return (i.e. do nothing) failed_queue.remove(job_id) continue if job.status == rq.job.Status.FAILED: failed_queue.requeue(job_id) requeued += 1 else: failed_queue.remove(job_id) logger.info("Requeued %d failed jobs", requeued) return requeued
class BaseSpider(object): NAME = "base" task_name = "" redis_conn = django_rq.get_connection() task_model = BaseSpiderTask _current_job = None _current_task = None def _get_job(self): queue = Queue(self.task_name, connection=self.redis_conn) return queue.dequeue() def get_current_job(self): self._current_job = self._get_job() return self._current_job def get_current_task(self): self.get_current_job() if not self._current_job: return None self._current_task = self.task_model.objects.filter( task_name=self.task_name, id=self._current_job.id).first() self._current_task.lock() self._current_task.save() return self._current_task def run(self): pass
def handle(self, *args, **kwargs): if sum([kwargs['limit'], kwargs['job_id']]) > 1: raise CommandError("You can't mix limit and job_id options.") elif sum([kwargs['limit'], kwargs['job_id']]) == 0: raise CommandError("Choose something...") if not args: raise CommandError("Specify limit or job_id.") queue = django_rq.get_queue('default') if kwargs.get('limit', False): try: limit = int(args[0]) except (IndexError, ValueError) as e: raise CommandError(e) job = queue.enqueue_call( func=prime_numbers, args=(limit,), timeout=3600, result_ttl=60, ) self.stdout.write("JOB ID = %s" % job.id) if kwargs.get('job_id', False): job = rq.job.Job.fetch(args[0], django_rq.get_connection()) while job.result is None and not job.is_failed: time.sleep(0.5) sys.stdout.write('.') sys.stdout.flush() if job.is_failed: self.stdout.write('\nJOB FAILED\n') else: self.stdout.write('\n%s\n' % job.result)
def job_message(message, status="success"): job = get_current_job(connection=django_rq.get_connection()) if not job.meta.get('messages'): job.meta['messages'] = deque() job.meta['messages'].append({"message": message, "status": status}) job.save_meta() job.save()
def run(*args): print("Empty queues") django_rq.get_queue('bot').empty() django_rq.get_queue('listen').empty() # Stop existing jobs registry = StartedJobRegistry('bot', connection=django_rq.get_connection('bot')) running_ids = registry.get_job_ids() if len(running_ids) > 1: for i in running_ids: current_job = django_rq.get_queue('bot').fetch_job(i) print("Delete : ", current_job) current_job.delete() else: for i in running_ids: current_job = django_rq.get_queue('bot').fetch_job(i) print("Send kill : ", current_job) current_job.meta['kill'] = "true" current_job.save_meta() if args and len(args) > 0 and args[0] == "stop": return print("Launch bot job") print(launch_bot.delay())
def render_filename(self, value, record): url = static('cloud-download.png') #logger = logging.getLogger('logparser.tables') #logger.debug('Hello logs!') if platform != "win32": try: redis_conn = get_connection() q = Queue(connection=redis_conn) job = q.fetch_job(record.job_id) if job.is_finished: ret = {'status': 'ready'} elif job.is_queued: ret = {'status': 'in-queue'} elif job.is_started: ret = {'status': 'running...'} elif job.is_failed: ret = {'status': 'failed'} except: ret = {'status': 'starting...'} else: ret = {'status': value} if (value == 'none'): return mark_safe(ret['status']) else: conf = record.get_config() tmp_path = conf['tmp_path'] href = os.path.join( settings.MEDIA_URL, os.path.relpath(os.path.join(tmp_path, value), settings.MEDIA_ROOT)) return mark_safe('<a href="' + href + '">' + ret['status'] + '</a>')
def handle(self, *args, **kwargs): if sum([kwargs['limit'], kwargs['job_id']]) > 1: raise CommandError("You can't mix limit and job_id options.") elif sum([kwargs['limit'], kwargs['job_id']]) == 0: raise CommandError("Choose something...") if not args: raise CommandError("Specify limit or job_id.") queue = django_rq.get_queue('default') if kwargs.get('limit', False): try: limit = int(args[0]) except (IndexError, ValueError) as e: raise CommandError(e) job = queue.enqueue_call( func=prime_numbers, args=(limit, ), timeout=3600, result_ttl=60, ) self.stdout.write("JOB ID = %s" % job.id) if kwargs.get('job_id', False): job = rq.job.Job.fetch(args[0], django_rq.get_connection()) while job.result is None and not job.is_failed: time.sleep(0.5) sys.stdout.write('.') sys.stdout.flush() if job.is_failed: self.stdout.write('\nJOB FAILED\n') else: self.stdout.write('\n%s\n' % job.result)
def get_running_job(): registry = StartedJobRegistry('bot', connection=django_rq.get_connection('bot')) running_ids = registry.get_job_ids() if len(running_ids) == 0: return None return django_rq.get_queue('bot').fetch_job(running_ids[0])
def clear_failed(): """Clear jobs in the failed queue.""" connection = django_rq.get_connection() failed_queue = rq.queue.get_failed_queue(connection) job_ids = failed_queue.job_ids cleared = 0 for job_id in job_ids: try: job = rq.job.Job.fetch(job_id, connection=connection) except rq.job.NoSuchJobError: # Silently ignore/remove this job and return (i.e. do nothing) failed_queue.remove(job_id) continue # Delete jobs for this task task_key = job.meta.get('analysis.task.key') if task_key: task = AnalysisTask.get(task_key) frame_id = job.meta.get('analysis.frame.id') if task and frame_id: # Delete the corresponding frame frame_class = task.get_frame_class() try: frame_class.objects.filter(pk=frame_id, calculated=False).delete() except Exception as e: logger.warn(e, exc_info=True) job.cancel() cleared += 1 logger.info("Cleared %d failed jobs", cleared) return cleared
class AddressCreateView(View): model = Address form_class = AddressForm template_name = _address_template_path + "create.html" redis_conn = django_rq.get_connection('default') def get(self, request, *args, **kwargs): context = {"form": self.form_class} return render(request, self.template_name, context) def post(self, request, *args, **kwargs): form = self.form_class(request.POST) if form.is_valid(): address_object = form.save(commit=False) address_object.created_by = request.user address_object.save() try: # Put a worker in the RQ to fetch the latitude and longitude of the address self.redis_conn.enqueue(forward_geocode_call, address_object) except redis_exceptions.ConnectionError as e: # TODO loggy pass return HttpResponseRedirect( reverse_lazy("address-detail", args=[address_object.pk])) return render(request, self.template_name, {"form": form})
class AddressDetailView(DetailView): model = Address template_name = _address_template_path + 'detail.html' context_object_name = 'address' redis_conn = django_rq.get_connection('default') def __init__(self, *args, **kwargs): super().__init__(**kwargs) def get_object(self, queryset=model): return get_object_or_404(self.model, pk=self.kwargs["pk"]) def get_context_data(self, **kwargs): self.object = self.get_object() context = super(AddressDetailView, self).get_context_data(**kwargs) try: if not self.object.static_map: self.redis_conn.enqueue(get_static_map_image, self.object) except redis_exceptions.ConnectionError as e: # TODO loggy boy pass _address_str = str(self.object.street + ",+" + self.object.city + "+" + self.object.state).replace(' ', '+') context['address_map_url'] = _address_str context['street_label'] = str(self.object.street).replace(" ", "+") return context
def plot(request): fig_id = request.session.get('figura') redis_conn = django_rq.get_connection('high') job = Job.fetch(fig_id,connection = redis_conn) if job.get_status() == 'finished': # Como enviaremos la imagen en bytes la guardaremos en un buffer buf = io.BytesIO() canvas = FigureCanvasAgg(job.result[0]) canvas.print_png(buf) # Creamos la respuesta enviando los bytes en tipo imagen png response = HttpResponse(buf.getvalue(), content_type='image/png') # Limpiamos la figura para liberar memoria plt.close(job.result[0]) buf.close() # Añadimos la cabecera de longitud de fichero para más estabilidad response['Content-Length'] = str(len(response.content)) # Devolvemos la response return response else: return HttpResponse('')
def processing(request): status_token = request.query_params.get('status_token', None) if not status_token: return Response({'message': 'Token not defined'}, status=status.HTTP_400_BAD_REQUEST) ids = django_rq.get_queue('default').get_job_ids() for i, id in enumerate(ids): if id == status_token: registry = StartedJobRegistry('default', connection=django_rq.get_connection('default')) current = registry.get_job_ids()[0] return Response( { 'message': f'This repository is in the queue to be processed. Its processing queue number is {i+1}. In the meantime keep up with current processing.', 'link': f'https://{get_current_site(request)}/processing?status_token={current}' }, status=status.HTTP_200_OK ) tasks = AsyncTask.objects.filter(id=status_token) if tasks.exists() == False: return Response({'message': 'This token is not valid'}, status=status.HTTP_404_NOT_FOUND) task = tasks.first() # processing is completed if task.finished and task.failed==False: return Response({ 'message': ('The issues in this repository have been ' 'completely processed. Access the ' 'following link to see the ' 'resulting image.'), 'link': task.image }, status=status.HTTP_200_OK ) job = django_rq.get_queue('default').fetch_job(status_token) if task.finished and task.failed: job.meta.pop('ERRORS') return Response({ 'message': ('Something went wrong while processing this repository. ' 'Check the error below '), 'errors': job.meta }, status=status.HTTP_409_CONFLICT ) if task.finished == False: return Response({ 'message': 'This repository is still being processed. ', 'status': job.meta }, status=status.HTTP_200_OK )
def get_redis_connection(): if settings.TEST: import mockredis if not getattr(get_redis_connection, 'redis_mock', None): setattr(get_redis_connection, 'redis_mock', mockredis.MockRedis()) return get_redis_connection.redis_mock return django_rq.get_connection()
def rq_job(self): """The last RQ Job this ran on""" if not self.rq_id or not self.rq_origin: return try: return RQJob.fetch(self.rq_id, connection=get_connection(self.rq_origin)) except NoSuchJobError: return
def get(cls, pk): redis_conn = django_rq.get_connection() class_name = cls.__name__ lookup_key = '{}::{}'.format(class_name, pk) values = redis_conn.hgetall(lookup_key) return cls(**values)
def get_job_by_id(job_id: str) -> Optional[Job]: """ Returns Job for given id if exists """ redis_connection = django_rq.get_connection() try: return Job.fetch(job_id, redis_connection) except Exception: return None
def pending_or_running_jobs(queue): """ Chequea si hay trabajos encolados o corriendo, en la cola pasada por parámetro """ rq_queue = get_queue(queue) pending = bool(rq_queue.jobs) registry = StartedJobRegistry(name=queue, connection=get_connection(queue)) running = bool(len(registry)) return pending or running
def post(self, request, format=None): connection = django_rq.get_connection('high') thing = Job.fetch(request.data['job_id'], connection) return Response({ 'success': True, 'content': 'Hello World ' + str(thing.result) })
def __init__(self, **kwargs): for field, value in kwargs.iteritems(): setattr(self, field, value) self.primary_key_name = self.__class__.Meta.primary_key_name if not self.primary_key_name: raise ValueError('Meta.primary_key_name is required') self.redis_conn = django_rq.get_connection()
def http_client(): config = settings.REQUESTS_CACHE if config.get('backend') == 'redis' and 'connection' not in config: config['connection'] = django_rq.get_connection() if config.get('backend') == 'sqlite': # Create parent directories, if needed parent_dir = os.path.dirname(config['cache_name']) if not os.path.isdir(parent_dir): os.makedirs(parent_dir) return requests_cache.CachedSession(**config)
def queryset(self, request, queryset): # not optimum, but readable: # values below could have been computed # in specific if statements below # but it would have been cumbersome # UGLY hack to get current job # https://github.com/nvie/rq/pull/269 in_progress_ids = [] redis_conn = django_rq.get_connection() for k in redis_conn.keys(): try: data = unpickle(redis_conn.hget(k, 'data')) status = redis_conn.hget(k, 'status') if data[0] == 'archives.admin.encode' and status == 'started': in_progress_ids = [data[2][0], ] break except: pass queue = django_rq.get_queue('default') in_queue_ids = [job.args[0] for job in queue.jobs if job.func_name == 'archives.admin.encode'] failed_queue = django_rq.get_failed_queue('default') failed_ids = [job.args[0] for job in failed_queue.jobs if job.func_name == 'archives.admin.encode'] if self.value() == 'no_file': # We can't do file__isnull for queryset # because FileField is represented internally # as a CharField, and Django stores non files # as an empty string '' in the database. return queryset.filter(file="") if self.value() == 'in_queue': return queryset.filter(id__in=in_queue_ids) if self.value() == 'in_progress': return queryset.filter(id__in=in_progress_ids) if self.value() == 'failed': return queryset.filter(id__in=failed_ids) if self.value() == 'encoded': encoded = [media.id for media in queryset if media.is_encoded] return queryset.exclude(file="").exclude(id__in=in_queue_ids)\ .exclude(id__in=in_progress_ids)\ .exclude(id__in=failed_ids).filter(id__in=encoded) if self.value() == 'not_encoded': not_encoded = [media.id for media in queryset if not media.is_encoded] return queryset.exclude(file="").exclude(id__in=in_progress_ids)\ .exclude(id__in=failed_ids).filter(id__in=not_encoded)\ .exclude(id__in=in_queue_ids)
def form_valid(self, form, ): redis_conn = django_rq.get_connection('default') if len([x for x in Worker.all(connection=redis_conn) if settings.DJANGO_TEST_RQ_LOW_QUEUE in x.queue_names()]) == 0: messages.add_message(self.request, messages.ERROR, 'No active workers for queue!') return HttpResponseRedirect(reverse('long_tasks')) form.instance.result = 'QUEUED' long_task = form.save() long_runnig_task.delay(long_task) messages.info(self.request, 'Long task started.') return HttpResponseRedirect(reverse('long_tasks'))
def get_context_data(self, **kwargs): ctx = super(JobTemplateView, self).get_context_data(**kwargs) redis_conn = django_rq.get_connection('default') try: job = Job.fetch(self.kwargs['job'], connection=redis_conn) job = job.__dict__ except: job = None ctx['job'] = job return ctx
def fetch(self): if self.job_id: job_id = str(self.job_id) if self._enqueued_job: self._enqueued_job.refresh() else: connection = get_connection(self.queue) if RqJob.exists(job_id, connection=connection): self._enqueued_job = RqJob.fetch( job_id, connection=connection) return self._enqueued_job
def append_scan_summary_info(self, ip_addresses): if not ip_addresses: return delta = timezone.now() - datetime.timedelta(days=1) for ip_address in ip_addresses: if ip_address.scan_summary and ip_address.scan_summary.modified > delta: try: job = rq.job.Job.fetch(ip_address.scan_summary.job_id, django_rq.get_connection()) except rq.exceptions.NoSuchJobError: continue else: ip_address.scan_summary.changed = job.meta.get("changed", False)
def reschedule_all_failed(request): from rq import requeue_job from rq import get_failed_queue from django_rq import get_connection queue = get_failed_queue(get_connection()) for job in queue.jobs: requeue_job(job.id, connection=queue.connection) return HttpResponse('Success')
def get_queued_job_id_by_number(number: int) -> Optional[str]: """ Returns job_id when job for the same number exists """ redis_connection = django_rq.get_connection() queue = Queue(connection=redis_connection) job_ids = queue.started_job_registry.get_job_ids( ) + queue.scheduled_job_registry.get_job_ids() for job_id in job_ids: job = JobManager.get_job_by_id(job_id) if number in job.args: return job.id
def media_pre_delete(sender, instance, *args, **kwargs): try: conn = django_rq.get_connection() job = Job.fetch(instance.get_job_id(), connection=conn) if job.get_status() == 'started': try: send_stop_job_command(conn, instance.get_job_id()) except InvalidJobOperation: pass job.delete() except NoSuchJobError: pass
def __init__(self, message, queue='default', send_func=send): self.conn = django_rq.get_connection('default') self.send_func = send_func self.message = message self.limit = message.get_daily_message_limit() self.priority = getattr(message, 'priority', message.LOW) self.user = message.user self.date_string = message.deliver_on.date().strftime("%Y-%m-%d") # Since we only queue up messages 24 hours in advance, we can # auto-expire any values after a couple days. This can be a timedelta # object; self.expire = timedelta(days=2)
def _remove_run_job(sender, instance, using, **kwargs): """Signal to delete job when ModelRun is deleted""" redis_conn = django_rq.get_connection() job = Job.fetch(instance.job_id, redis_conn) if job.is_started: # Kill job pid os.kill(job.meta["workhorse_pid"], signal.SIGTERM) while job.get_status() not in [JobStatus.FAILED, JobStatus.FINISHED]: sleep(1) else: # Delete job from queue job.delete()
def all(cls): redis_conn = django_rq.get_connection() class_name = cls.__name__ all_items = redis_conn.lrange('{}:all'.format(class_name), 0, -1) objs = [] for item in all_items: lookup_key = '{}::{}'.format(class_name, item) values = redis_conn.hgetall(lookup_key) objs.append(cls(**values)) return objs
def run(request, run_id): """Details of single run page""" run = ModelRun.objects.get(pk=run_id) redis_conn = django_rq.get_connection() job = Job.fetch(run.job_id, redis_conn) run.job_metadata = job.meta metrics = run.metrics.order_by('name').values('name').distinct() return render(request, 'main/run_detail.html', {'run': run, 'metrics': metrics})
def _current_file_copied_in_progress(self): """ return current files being copied """ _in_progress = [] redis_conn = django_rq.get_connection() for k in redis_conn.keys(): try: data = unpickle(redis_conn.hget(k, 'data')) status = redis_conn.hget(k, 'status') if data[0] == 'archives.admin.archive' and status == 'started': _in_progress = [data[2][0], ] break except: pass return _in_progress
def schedules(request): # AT LEAST 2 Courses # inputs = ['CMPUT174', 'ECON281'] # term = '1690 - Fall Term 2019' params = request.query_params.dict() term = params["term"] inputs = params["courses"] job_id = params["job_id"] inputs = inputs.split(',') print("term: " , term) print("courses: " , inputs) if job_id: print("job already in queue") redis_conn = django_rq.get_connection('default') job = Job.fetch(job_id, connection=redis_conn) print("fetched job") else: print("create new job") job = django_rq.enqueue(entry, term, inputs) print("job id: " + job.id) if (job.result): res = job.result res_encoded = [] for courses in res: temp = [] for course in courses: course_encoded = jsonpickle.encode(course, unpicklable=False) temp.append(ast.literal_eval(course_encoded)) res_encoded.append(temp) data = { 'status': 'success', 'data': res_encoded } return Response(data) data = { 'status': 'processing', 'job_id': job.id } return JsonResponse(data)
def run(request, run_id): """Details of single run page""" run = ModelRun.objects.get(pk=run_id) redis_conn = django_rq.get_connection() job = Job.fetch(run.job_id, redis_conn) run.job_metadata = job.meta metrics = run.metrics.order_by("name").values("name").distinct() return render(request, "main/run_detail.html", { "run": run, "metrics": metrics })
def status_view(request): ret = {} job_id = request.GET.get('job_id') redis_conn = django_rq.get_connection() job = Job.fetch(job_id, redis_conn) if job.is_finished: ret = {'status': 'finished'} elif job.is_queued: ret = {'status': 'in-queue'} elif job.is_started: ret = {'status': 'waiting'} elif job.is_failed: ret = {'status': 'failed'} return HttpResponse(json.dumps(ret), content_type="application/json")
def display_game_results(request, jobid): conn = django_rq.get_connection('default') try: job = Job.fetch(jobid, connection=conn) print(f"job ID {jobid} size of result = {total_size(job.result)}") except Exception as e: return HttpResponse( f"{e}<br><br>Game Results are cached for 10 minutes. \ Either the game results have expired or there is a \ problem with the requested job.") if job.result == 1: # This implies error with gameid return HttpResponse(f"Error: Invalid GameID = {job.args}") else: return render(request, "gdanalyst/gameresult.html", {"result": job.result})
def queues_status(): queues = rq.Queue.all(connection=django_rq.get_connection()) result = {} for q in queues: jobs = q.get_jobs() oldest = None state_count = defaultdict(int) func_count = defaultdict(int) for j in jobs: if j.status != 'finished' and (not oldest or j.created_at < oldest): oldest = j.created_at job_type = j.func_name if job_type.endswith('create_frames'): if 'analysis.task.key' in j.meta: job_type = "create_frames[%s]" % j.meta.get('analysis.task.key', '?') else: job_type = j.get_call_string().replace('stream_analysis.utils.', 'scheduler:') elif job_type.endswith('analyze_frame'): if 'analysis.task.key' in j.meta: job_type = "analyze_frame[%s]" % j.meta.get('analysis.task.key', '?') else: job_type = j.get_call_string() else: job_type = j.get_call_string() func_count[job_type] += 1 state_count[j.status] += 1 #TODO: Fix this nasty hack -- rq doesn't use UTC if oldest: oldest = timezone.make_aware(oldest, timezone.get_default_timezone()) func_count = sorted(func_count.items()) result[q.name] = { 'name': q.name, 'count': q.count, 'oldest': oldest, 'state_count': dict(state_count), 'func_count': func_count } return result
def run_on_worker(self, **kwargs): cache = get_cache(self.cache_name) if isinstance(cache, DummyCache): # No caching or queues with dummy cache. data = self._worker_func(**kwargs) return (100, data, {}) if self._return_job_meta else (100, data) key = _get_cache_key(self.cache_section, **kwargs) cached = cache.get(key) if cached is not None: progress, job_id, data = cached connection = django_rq.get_connection(self.queue_name) job = Job.fetch(job_id, connection) if progress < 100 and job_id is not None: if job.is_finished: data = job.result progress = 100 cache.set( key, (progress, job_id, data), timeout=self.cache_final_result_timeout, ) elif job.is_failed: data = None progress = 100 cache.delete(key) else: queue = django_rq.get_queue(self.queue_name) job = queue.enqueue_call( func=self._worker_func, kwargs=kwargs, timeout=self.work_timeout, result_ttl=self.cache_final_result_timeout, ) progress = 0 data = None cache.set( key, (progress, job.id, data), timeout=self.cache_timeout, ) if self._return_job_meta: return progress, data, job.meta return progress, data
def _encoding_state(self): # No file associated with the media if not self.file: return ENCODING_NO_FILE # The file is currently processed redis_conn = django_rq.get_connection() for k in redis_conn.keys(): try: data = unpickle(redis_conn.hget(k, 'data')) status = redis_conn.hget(k, 'status') if data[0] == 'archives.admin.encode' and status == 'started' and self.id == data[2][0]: return ENCODING_IN_PROGRESS except: pass # The file is currently in queue for encoding process queue = django_rq.get_queue('default') for index, job in enumerate(queue.jobs): if job.func_name == 'archives.admin.encode': if job.args[0] == self.id: return ENCODING_IN_QUEUE, index # If not, the encoding process should have failed failed_queue = django_rq.get_failed_queue('default') for job in failed_queue.jobs: if job.func_name == 'archives.admin.encode': if job.args[0] == self.id: return ENCODING_FAILED # Or, there's no job for this media # So, we have two cases: the encoded files are available # and media is encoded, or files are not availabble # and we checked before that we weren't processing file # or having a 'failed' encoding process # so, the file is just 'not encoded' if self.file: # Test if files in stream repository exist if self.is_encoded: return ENCODING_ENCODED else: return ENCODING_NOT_ENCODED
def get_result(request): job_id = request.GET.get('job_id') redis_conn = django_rq.get_connection() job_id = job_id.split(':')[2] logger.info(job_id) try: job = Job.fetch(job_id, redis_conn) if job.is_finished: ret = job.return_value elif job.is_queued: ret = {'status': 'in-queue'} status = 201 elif job.is_started: ret = {'status': 'waiting'} elif job.is_failed: ret = {'status': 'failed'} return HttpResponse(json.dumps(ret), content_type="application/json") except NoSuchJobError: ret = {'status': 'No Job Found'} return HttpResponseNotFound(json.dumps(ret), content_type="application/json")
def _get_changed_devices_ids(self): delta = timezone.now() - datetime.timedelta(days=1) ids = set() for scan_summary in ScanSummary.objects.filter(modified__gt=delta): try: job = rq.job.Job.fetch( scan_summary.job_id, django_rq.get_connection(), ) except rq.exceptions.NoSuchJobError: continue else: if job.meta.get('changed', False): for device_id in scan_summary.ipaddress_set.values_list( 'device__id', flat=True, ): if device_id: ids.add(device_id) return sorted(list(ids))
def worker_status(): workers = rq.Worker.all(connection=django_rq.get_connection()) worker_data = [] running = False for w in workers: if not w.stopped: running = True worker_data.append({ 'name': w.name, 'state': w.state, 'stopped': w.stopped, 'queues': w.queue_names(), }) result = { "workers": worker_data, "running": running } return result
def retrieve(self, request, pk=None, format=None): """Get all details for a run Arguments: request {[Django request]} -- The request object Keyword Arguments: pk {string} -- Id of the run format {string} -- Output format to use (default: {None}) Returns: Json -- Object containing all metrics for the pod """ run = ModelRun.objects.get(pk=pk) redis_conn = django_rq.get_connection() job = Job.fetch(run.job_id, redis_conn) run.job_metadata = job.meta serializer = ModelRunSerializer(run, many=False) return Response(serializer.data, status=status.HTTP_200_OK)
def _get_cached(self, **kwargs): cache = get_cache(CACHE_NAME) if isinstance(cache, DummyCache): # No caching or queues with dummy cache. header, data = self._get_header_and_data(**kwargs) return 100, header, data key = _get_cache_key(self.section, **kwargs) cached = cache.get(key) if cached is not None: progress, job_id, header, data = cached if progress < 100 and job_id is not None and QUEUE_NAME: connection = django_rq.get_connection(QUEUE_NAME) job = Job.fetch(job_id, connection) if job.is_finished: header, data = job.result progress = 100 cache.set(key, (progress, job_id, header, data)) elif job.is_failed: header, data = None, None progress = 100 cache.delete(key) else: if QUEUE_NAME: queue = django_rq.get_queue(QUEUE_NAME) job = queue.enqueue_call( func=self._get_header_and_data, kwargs=kwargs, timeout=TIMEOUT, ) progress = 0 header = None data = None cache.set(key, (progress, job.id, header, data)) else: progress = 0 cache.set(key, (progress, None, None, None)) header, data = self._get_header_and_data(**kwargs) progress = 100 cache.set(key, (progress, None, header, data)) return progress, header or [], data or []
def run_eregs_command(eregs_args): """Run `eregs *eregs_args`, capturing all of the logs and storing them in Redis""" log = StringIO() logger = logging.getLogger('regparser') log_handler = logging.StreamHandler(log) logger.propagate = False logger.addHandler(log_handler) try: context = eregs.cli.make_context('eregs', args=list(eregs_args)) eregs.cli.invoke(context) finally: log_handler.flush() # Recreating the connection due to a bug in rq: # https://github.com/nvie/rq/issues/479 conn = django_rq.get_connection() job = get_current_job(conn) job.meta['logs'] = log.getvalue() job.save() logger.removeHandler(log_handler)
def show_stats(write_fn): """Print some metrics to stdout""" queue = django_rq.get_queue() conn = django_rq.get_connection() write_fn("Queued:") for job in queue.jobs: _print(write_fn, job) write_fn("Started:") for job_id in registry.StartedJobRegistry(connection=conn).get_job_ids(): _print(write_fn, queue.fetch_job(job_id)) write_fn("Finished:") for job_id in registry.FinishedJobRegistry(connection=conn).get_job_ids(): _print(write_fn, queue.fetch_job(job_id)) write_fn("Failed:") for job in FailedQueue(connection=conn).jobs: _print(write_fn, job) for line in job.exc_info.split('\n'): write_fn("\t\t" + line)
def handle(self, *args, **options): redis_conn = django_rq.get_connection('default') q = Queue(settings.DJANGO_TEST_RQ_LOW_QUEUE, connection=redis_conn) worker = Worker([q], exc_handler=my_handler, connection=redis_conn) worker.work()
def __init__(self, **kwargs): self.connection = django_rq.get_connection(self.QUEUE_NAME) self.queue = django_rq.get_queue(self.QUEUE_NAME) super(Report, self).__init__(**kwargs)
} INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'django_rq', 'django_pandas', ) import django_rq redis_conn = django_rq.get_connection('high') MIDDLEWARE_CLASSES = ( 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) ROOT_URLCONF = 'testex.urls' WSGI_APPLICATION = 'testex.wsgi.application'
def _scan_postprocessing(results, job, ip_address=None): """ Postprocessing is an act of calculation checksums on scan results, and maintenance RQ jobs. """ if any(( 'messages' not in job.meta, 'finished' not in job.meta, 'status' not in job.meta, )): job.meta['messages'] = [] job.meta['finished'] = [] job.meta['status'] = {} job.save() # get connected ip_address if ip_address: ip_address, created = IPAddress.concurrent_get_or_create( address=ip_address, ) else: ip_addresses = _get_ip_addresses_from_results(results) try: ip_address = ip_addresses[0] except IndexError: return # get (and update) or create scan_summary old_job = None if ip_address.scan_summary: scan_summary = ip_address.scan_summary try: old_job = rq.job.Job.fetch( scan_summary.job_id, django_rq.get_connection(), ) except rq.exceptions.NoSuchJobError: pass else: if 'messages' in old_job.meta and not job.meta['messages']: job.meta['messages'] = old_job.meta['messages'] for plugin in old_job.meta.get('finished', []): if plugin not in job.meta['finished']: job.meta['finished'].append(plugin) for plugin, status in old_job.meta.get('status', {}).iteritems(): if plugin not in job.meta['status']: job.meta['status'][plugin] = status job.save() scan_summary.job_id = job.id else: scan_summary, created = ScanSummary.concurrent_get_or_create( job_id=job.id, ) ip_address.scan_summary = scan_summary # update exists results data if old_job: updated_results = old_job.result if updated_results is not None: for plugin_name, plugin_results in results.iteritems(): updated_results[plugin_name] = plugin_results if plugin_name not in job.meta['finished']: job.meta['finished'].append(plugin_name) if plugin_name not in job.meta['status']: job.meta['status'][plugin_name] = plugin_results['status'] job.save() results.update(updated_results) # calculate new checksum cleaned_results = _get_cleaned_results(results) checksum = _get_results_checksum(cleaned_results) job.meta['results_checksum'] = checksum job.save() # calculate new status if all(( checksum != scan_summary.previous_checksum, checksum != scan_summary.false_positive_checksum, )): job.meta['changed'] = True else: job.meta['changed'] = False scan_summary.false_positive_checksum = None job.save() scan_summary.save() ip_address.save() # cancel old job (if exists) if old_job: rq.cancel_job(old_job.id, django_rq.get_connection())