def form_valid(self, form): form.save() action = form.cleaned_data if action == 'cancel_selected': # django forms don't support "getlist" job_ids = self.request.POST.getlist('_selected_action') for job_id in job_ids: cancel_job(job_id, self.connection) # this has the side effect of flushing the canceled jobs, # otherwise on redirect you may get an empty list.. form.queue.get_jobs(0, len(job_ids)) elif action == 'requeue_selected': job_ids = self.request.POST.getlist('_selected_action') for job_id in job_ids: requeue_job(job_id, self.connection) form.queue.get_jobs(0, len(job_ids)) msgs = { 'compact': __('Queue compacted'), 'empty': __('Queue emptied'), 'requeue': __('Jobs requeued'), 'cancel_selected': __('Selected jobs canceled'), 'requeue_selected': __('Selected jobs requeued'), } messages.success(self.request, msgs[action]) return super(QueueDetails, self).form_valid(form)
def save(self): action = self.cleaned_data print self.job.id if action == 'requeue': requeue_job(self.job.id, connection=self.job.connection) elif action == 'cancel': cancel_job(self.job.id, connection=self.job.connection)
def api_rebuild(): """Rebuild the site (internally).""" if db is None: return '{"error": "single-user mode"}' build_job = q.fetch_job('build') orphans_job = q.fetch_job('orphans') if not build_job and not orphans_job: build_job = q.enqueue_call(func=coil.tasks.build, args=(app.config['REDIS_URL'], app.config['NIKOLA_ROOT'], ''), job_id='build') orphans_job = q.enqueue_call(func=coil.tasks.orphans, args=(app.config['REDIS_URL'], app.config['NIKOLA_ROOT']), job_id='orphans', depends_on=build_job) d = json.dumps({'build': build_job.meta, 'orphans': orphans_job.meta}) if ('status' in build_job.meta and build_job.meta['status'] is not None and 'status' in orphans_job.meta and orphans_job.meta['status'] is not None): rq.cancel_job('build', db) rq.cancel_job('orphans', db) db.set('site:needs_rebuild', '0') site.coil_needs_rebuild = '1' return d
def api_render(slug): """Rebuild the site (internally).""" r1_job = q.fetch_job('{0}.r1'.format(slug)) r2_job = q.fetch_job('{0}.r2'.format(slug)) if not r1_job and not r2_job: r1_job = q.enqueue_call( func=render_task, args=(app.config['REDIS_URL'], app.config['DOCPATH'], slug), job_id='{0}.r1'.format(slug)) r2_job = q.enqueue_call( func=render_task, args=(app.config['REDIS_URL'], app.config['DOCPATH'], slug), job_id='{0}.r2'.format(slug), depends_on=r1_job) d = json.dumps({'1': r1_job.meta, '2': r2_job.meta}) if ('status' in r1_job.meta and r1_job.meta['status'] is not None and 'status' in r2_job.meta and r2_job.meta['status'] is not None): rq.cancel_job('build', redisdb) rq.cancel_job('orphans', redisdb) return d
def get_rqinfo(options): """获取rq队列信息 """ redis_conn = Redis.from_url(options.connection) push_connection(redis_conn) # RQ队列信息获取操作 if options.status: workers = Worker.all() queues = Queue.all() return workers, queues if options.queue: queue = Queue(options.queue) return queue if options.cancel_job: cancel_job(options.cancel_job) return 'OK' if options.requeue_job: requeue_job(options.requeue_job) return 'OK' if options.requeue_all: return requeue_all() if options.empty_queue: empty_queue(options.empty_queue) return 'OK' if options.compact_queue: compact_queue(options.compact_queue) return 'OK' if options.queues: return list_queues() if options.jobs: return list_jobs(options.jobs) if options.workers: return list_workers() pop_connection()
def delete(self, *args, **kwargs): if self.job: try: cancel_job(self.job) except Exception: pass super().delete(*args, **kwargs)
def cancel_optimization(self, request, public_id=None): transport_network_obj = self.get_object() if transport_network_obj.optimization_status in [ TransportNetwork.STATUS_ERROR, TransportNetwork.STATUS_FINISHED ]: raise ValidationError('Optimization is not running or queued') redis_conn = get_connection() workers = Worker.all(redis_conn) for worker in workers: if worker.state == WorkerStatus.BUSY and \ worker.get_current_job_id() == str(transport_network_obj.job_id): send_kill_horse_command(redis_conn, worker.name) # remove from queue cancel_job(str(transport_network_obj.job_id), connection=redis_conn) transport_network_obj.optimization_status = None transport_network_obj.optimization_ran_at = None transport_network_obj.optimization_error_message = None transport_network_obj.save() return Response( TransportNetworkSerializer(transport_network_obj).data, status.HTTP_200_OK)
def cancel_job(self, job_id): with Connection(self.redis_conn): try: cancel_job(job_id) status = True except: status = False return status
def pidis_killer(): redis_conn = Redis() use_connection(redis_conn) q = Queue('high', connection=redis_conn) q.empty() jobs = q.job_ids for j in jobs: cancel_job(j) killZombies()
def cancel_test(markus_address, run_ids, **kw): """ Cancel a test run job with the job_id defined using markus_address and run_id. """ with rq.Connection(ats.redis_connection()): for run_id in run_ids: job_id = format_job_id(markus_address, run_id) rq.cancel_job(job_id)
def cancel_all(queue_name): queue = Queue(queue_name) count = 0 for job_id in queue.get_job_ids(): if Job.exists(job_id, queue.connection): cancel_job(job_id) count += 1 return dict(status='OK', count=count)
def job(self, job_id, queue_name, action): if action == 'cancel': cancel_job(job_id) elif action == 'requeue': requeue_job(job_id) else: raise HTTP(404) if current.request.ajax: return jobs_table(queue_name) redirect(URL())
def cancel_job_view(job_id): job = Job.fetch(job_id) if job.is_queued: if current_app.config.get('RQ_DASHBOARD_DELETE_JOBS', False): job.delete() else: cancel_job(job_id) else: job.delete() return dict(status='OK')
def cancel_job_view(job_id): if current_app.config.get('RQ_DASHBOARD_DELETE_JOBS', False): Job.fetch(job_id).delete() else: sq = SchedulerQueue() scheduled_jobs = sq.get_job_ids() if job_id in scheduled_jobs: sq.connection.zrem('{}scheduled_jobs'.format(sq.scheduler_queue_namespace_prefix), job_id) else: cancel_job(job_id) return dict(status='OK')
def delete_run(run_id, format=None): if format is None: format = 'html' run = db.Run.find_one( { '_id' : run_id } ) cancel_job(run.task_id) run.delete() if format == 'json': return jsonify( { 'status' : "success" }) else: flash("Run deleted") return redirect(url_for('runs'))
def delete_dataset(dataset_id, format=None): if format is None: format = 'html' dataset = db.Dataset.find_one( { '_id' : dataset_id } ) cancel_job(dataset.task_id) dataset.delete() if format == 'json': return jsonify( { 'status' : "success" }) else: flash("Dataset deleted") return redirect(url_for('datasets'))
def delete_shoreline(shoreline_id, format=None): if format is None: format = 'html' shoreline = db.Shoreline.find_one( { '_id' : shoreline_id } ) cancel_job(shoreline.task_id) shoreline.delete() if format == 'json': return jsonify( { 'status' : "success" }) else: flash("Shoreline deleted") return redirect(url_for('shorelines'))
def delete_job(job_id): redis_conn = get_connection() workers = Worker.all(redis_conn) for worker in workers: if worker.state == WorkerStatus.BUSY and \ worker.get_current_job_id() == str(job_id): send_kill_horse_command(redis_conn, worker.name) try: # remove from queue cancel_job(str(job_id), connection=redis_conn) except NoSuchJobError: pass
def index(): form = InputForm() sessionJobInit() # can we avoid this? if form.validate_on_submit(): if app.debug: flash('Generating poem with title %s at drunkenness %s' %(form.titleSeed.data, str(form.howDrunk.data))) #return redirect('/') if session['job']: cancel_job(session['job'], connection=conn) job = q.enqueue_call(func=generate_text, args=(form.titleSeed.data,form.howDrunk.data),result_ttl=300, timeout=6000) session['job'] = job.get_id() print(job.get_id()) # e.g. 66df343f-2841-4fd2-986d-b83d459a6693 return render_template('reading.html', pageTitle=None, form=form, poemTitle=form.titleSeed.data, poemHowDrunk=str(form.howDrunk.data), poemContent='BIG ASS T**S', jobId = job.get_id()) return render_template('reading.html', pageTitle=None, form=form, putput='')
def delete_run(): runs = db.query_all_runs(user_id=session['user_id']) run_id = int(runs[int(request.form['index']) - 1]['id']) # Will cancel runs if they are currently in queue ids = db.query_get_job_ids(run_id) for id in ids: if id in current_app.task_queue.jobs: cancel_job(job_id=id, connection=current_app.redis) # Deletes all files associated with run and sets live = 0 in database (which will cancel run if it is currently in process and checkpoint is reached) db.clean_run(run_id=run_id) username, title = db.query_username_title(run_id=run_id) logger.info('User #{} ({}) deleted Run #{} ({})'.format( session['user_id'], username, run_id, title)) return ''
def _cancel_job(self, job_id): event = self._jobs.get(job_id, None) # The job is in the scheduler if event: try: self._scheduler.cancel(event) del self._jobs[job_id] logger.debug("Event found for #%s; canceling it", job_id) return except ValueError: logger.debug("Event not found for #%s; it should be on the queue", job_id) # The job is running on a queue rq.cancel_job(job_id, connection=self.conn) logger.debug("Job #%s canceled", job_id)
def cancel_job(self, name): print("Cancelling job %s ..." % name) jobs = self.get_jobs() name_list = [x['name'] for x in jobs] try: status = jobs[name_list.index(str(name))]['status'] except ValueError: print("There is no job named %s" % name) return False job = Job.fetch(id=name, connection=self.conn) status = job.get_status() if status == "finished" or status == "failed": print("Cannot cancel a finished/failde job") return False else: cancel_job(name, connection=self.conn) self.change_status(name, 'canceled') print("Job %s canceled..." % name) return True
def pre_delete_job_enqueueings(): job_id = request.view_args['job_id'] job = get_jobs_db().find_one({'_id': ObjectId(job_id)}) if job and job['status'] in CANCELLABLE_JOB_STATUSES: # Cancel the job from RQ cancel_job(job_id, connection=ghost.ghost_redis_connection) get_jobs_db().update({'_id': ObjectId(job_id)}, { '$set': { 'status': 'cancelled', 'message': 'Job cancelled', '_updated': datetime.now() } }) return # Do not allow cancelling jobs not in init status abort(422, description="Cancelling a job not in init status is not allowed")
def do_GET(self): from werkzeug.routing import Map, Rule from werkzeug.exceptions import NotFound, MethodNotAllowed from oorq.oorq import setup_redis_connection from jinja2 import Template from rq import cancel_job from rq.job import Job from rq.exceptions import NoSuchJobError import times setup_redis_connection() m = Map([Rule('/job/<string:job>', endpoint='job'), Rule('/job/<string:job>/download', endpoint='download'), Rule('/job/<string:job>/cancel', endpoint='cancel')]) urls = m.bind('') try: endpoint, params = urls.match(self.path) job = Job.fetch(params['job']) self.send_response(200) if endpoint in ('job', 'cancel'): running_time = times.now() - job.enqueued_at if endpoint == 'cancel': cancel_job(job.id) self.send_header('Content-Type', 'text/html') self.end_headers() content = Template(get_template('jobs.html')) self.wfile.write(content.render(job=job, rt=running_time)) elif endpoint == 'download' and job.status == 'finished': self.send_header('Content-Type', 'application/%s' % job.meta['format']) self.send_header('Content-Length', len(job.result[0])) self.send_header('Content-Disposition', 'attachment;' 'filename=report.%s' % job.meta['format']) self.end_headers() self.wfile.write(job.result[0]) except (NotFound, NoSuchJobError): self.send_response(404) self.end_headers() except MethodNotAllowed: self.send_response(405) self.end_headers()
def cancel(self, task_id): with Connection(redis.from_url(self.url)): cancel_job(task_id)
def cancel(self, cursor, uid, ids, context=None): if not context: context = {} if 'jid' in context: cancel_job(context['jid']) return True
def delete_task_from_queue(job_id): from rq import cancel_job from rq import Connection with Connection(): cancel_job(job_id)
def _scan_postprocessing(results, job, ip_address=None): """ Postprocessing is an act of calculation checksums on scan results, and maintenance RQ jobs. """ if any(( 'messages' not in job.meta, 'finished' not in job.meta, 'status' not in job.meta, )): job.meta['messages'] = [] job.meta['finished'] = [] job.meta['status'] = {} job.save() # get connected ip_address if ip_address: ip_address, created = IPAddress.concurrent_get_or_create( address=ip_address, ) else: ip_addresses = _get_ip_addresses_from_results(results) try: ip_address = ip_addresses[0] except IndexError: return # get (and update) or create scan_summary old_job = None if ip_address.scan_summary: scan_summary = ip_address.scan_summary try: old_job = rq.job.Job.fetch( scan_summary.job_id, django_rq.get_connection(), ) except rq.exceptions.NoSuchJobError: pass else: if 'messages' in old_job.meta and not job.meta['messages']: job.meta['messages'] = old_job.meta['messages'] for plugin in old_job.meta.get('finished', []): if plugin not in job.meta['finished']: job.meta['finished'].append(plugin) for plugin, status in old_job.meta.get('status', {}).iteritems(): if plugin not in job.meta['status']: job.meta['status'][plugin] = status job.save() scan_summary.job_id = job.id else: scan_summary, created = ScanSummary.concurrent_get_or_create( job_id=job.id, ) ip_address.scan_summary = scan_summary # update exists results data if old_job: updated_results = old_job.result if updated_results is not None: for plugin_name, plugin_results in results.iteritems(): updated_results[plugin_name] = plugin_results if plugin_name not in job.meta['finished']: job.meta['finished'].append(plugin_name) if plugin_name not in job.meta['status']: job.meta['status'][plugin_name] = plugin_results['status'] job.save() results.update(updated_results) # calculate new checksum cleaned_results = _get_cleaned_results(results) checksum = _get_results_checksum(cleaned_results) job.meta['results_checksum'] = checksum job.save() # calculate new status if all(( checksum != scan_summary.previous_checksum, checksum != scan_summary.false_positive_checksum, )): job.meta['changed'] = True else: job.meta['changed'] = False scan_summary.false_positive_checksum = None job.save() scan_summary.save() ip_address.save() # cancel old job (if exists) if old_job: rq.cancel_job(old_job.id, django_rq.get_connection())
def save(self): action = self.cleaned_data if action == 'requeue': requeue_job(self.job.id, connection=self.job.connection) elif action == 'cancel': cancel_job(self.job.id, connection=self.job.connection)
def cancel_job_view(job_id): if current_app.config.get('RQ_DASHBOARD_DELETE_JOBS'): Job.fetch(job_id).delete() else: cancel_job(job_id) return dict(status='OK')
def cancel_all_job_view(queue_name, state=None): for job_id in get_all_job_ids(queue_name, state): cancel_job(job_id) return dict(status='OK')
def cancel(): if 'job' in request.args: cancel_job(request.args['job'], connection=Redis()) return redirect('/kols')
def cancel_rq_job(job_id): assert job_id cancel_job(job_id) return dict(status='OK')
def cancel(self, job_id): with Connection(self.conn): cancel_job(job_id)
def cancel_job_view(job_id): cancel_job(job_id) return dict(status='OK')
def cancel_job_view(job_id): if current_app.config.get('DELETE_JOBS'): Job.fetch(job_id).delete() else: cancel_job(job_id) return dict(status='OK')