def run_job(job_id): job = fetch('Job', id=job_id) if job.status == 'Running': return {'error': 'Job is already running.'} targets = job.compute_targets() if hasattr(job, 'has_targets'): if job.has_targets and not targets: return {'error': 'Set devices or pools as targets first.'} if not job.has_targets and targets: return { 'error': 'This service should not have targets configured.' } if current_app.config['CLUSTER']: rest_post( 'http://0.0.0.0/rest/run_job', json={'name': job.name}, auth=HTTPBasicAuth(current_user.name, current_user.password), ) else: scheduler.add_job(id=str(datetime.now()), func=scheduler_job, run_date=datetime.now(), args=[job.id], trigger='date') return job.serialized
def post(self) -> Union[str, dict]: payload = request.get_json() job = fetch("Job", name=payload["name"]) handle_asynchronously = payload.get("async", False) try: targets = { fetch("Device", name=device_name) for device_name in payload.get("devices", "") } | { fetch("Device", ip_address=ip_address) for ip_address in payload.get("ip_addresses", "") } for pool_name in payload.get("pools", ""): targets |= {d for d in fetch("Pool", name=pool_name).devices} except Exception as e: info(f"REST API run_job endpoint failed ({str(e)})") return str(e) if handle_asynchronously: scheduler.add_job( id=str(datetime.now()), func=scheduler_job, run_date=datetime.now(), args=[job.id, None, [d.id for d in targets]], trigger="date", ) return job.serialized else: return job.try_run(targets=targets)[0]
def post(self): payload = request.get_json() job = fetch('Job', name=payload['name']) handle_asynchronously = payload.get('async', True) try: targets = { fetch('Device', name=device_name) for device_name in payload.get('devices', '') } | { fetch('Device', ip_address=ip_address) for ip_address in payload.get('ip_addresses', '') } for pool_name in payload.get('pools', ''): targets |= {d for d in fetch('Pool', name=pool_name).devices} except Exception as e: info(f'REST API run_job endpoint failed ({str(e)})') return str(e) if handle_asynchronously: scheduler.add_job(id=str(datetime.now()), func=scheduler_job, run_date=datetime.now(), args=[job.id, None, [d.id for d in targets]], trigger='date') return job.serialized else: return job.try_run(targets=targets)[0]
def run_job(job_id): job = fetch('Job', id=job_id) if job.status == 'Running': return {'error': 'Job is already running.'} targets = job.compute_targets() if hasattr(job, 'has_targets'): if job.has_targets and not targets: return {'error': 'Set devices or pools as targets first.'} if not job.has_targets and targets: return { 'error': 'This service should not have targets configured.' } scheduler.add_job(id=str(datetime.now()), func=scheduler_job, run_date=datetime.now(), args=[job.id], trigger='date') return job.serialized
def run_job(job_id: int) -> dict: job = fetch("Job", id=job_id) if job.status == "Running": return {"error": "Job is already running."} targets = job.compute_targets() if hasattr(job, "has_targets"): if job.has_targets and not targets: return {"error": "Set devices or pools as targets first."} if not job.has_targets and targets: return { "error": "This service should not have targets configured." } scheduler.add_job( id=str(datetime.now()), func=scheduler_job, run_date=datetime.now(), args=[job.id], trigger="date", ) return job.serialized
def post(self): payload = request.get_json() job = fetch('Job', name=payload['name']) handle_asynchronously = payload.get('async', True) targets = { fetch('Device', name=device_name).id for device_name in payload.get('devices', '') } for pool_name in payload.get('pools', ''): targets |= {d.id for d in fetch('Pool', name=pool_name).devices} if handle_asynchronously: scheduler.add_job( id=str(datetime.now()), func=scheduler_job, run_date=datetime.now(), args=[job.id], trigger='date' ) return job.serialized else: return job.try_run(targets=targets)[0]
def schedule(self): default, trigger = self.kwargs() scheduler.add_job(**{**default, **trigger})
def schedule(self) -> None: default, trigger = self.kwargs() if not scheduler.get_job(self.aps_job_id): scheduler.add_job(**{**default, **trigger}) else: scheduler.reschedule_job(default.pop("id"), **trigger)