def get(self,request,job_id): """ approve job """ job_state = request.GET['job_state'] JobStatemachine.send_user_action(job_id,job_state,JobStateOutcome.approved_by_custodian) return redirect("/harvest/runningjob")
def get(self, request, job_id): """ approve job """ job_state = request.GET['job_state'] JobStatemachine.send_user_action(job_id, job_state, JobStateOutcome.approved_by_custodian) return redirect("/harvest/runningjob")
def get(self,request,job_id): """ cancel job """ #import ipdb;ipdb.set_trace() job_state = request.GET['job_state'] JobStatemachine.send_user_action(job_id,job_state,JobStateOutcome.cancelled_by_custodian) return redirect("/harvest/runningjob")
def get(self, request, job_id): """ cancel job """ #import ipdb;ipdb.set_trace() job_state = request.GET['job_state'] JobStatemachine.send_user_action( job_id, job_state, JobStateOutcome.cancelled_by_custodian) return redirect("/harvest/runningjob")
def create(self): job_batch_id = JobInterval.Triggered.job_batch_id() resp = {"status": True} result = None for name in self.data.get('publishes') or []: resp[name] = {} try: result = JobStatemachine.create_job_by_name( name, JobInterval.Triggered, job_batch_id) if result[0]: resp[name]["status"] = True resp[name]["job_id"] = result[1] resp[name]["message"] = "Succeed" else: resp["status"] = False resp[name]["status"] = False resp[name]["message"] = result[1] except: msg = traceback.format_exc() logger.error(msg) resp["status"] = False resp[name]["status"] = False resp[name]["message"] = msg return resp
def _publish(self,application,name,user): #get list of geojson files folder = os.path.join(BorgConfiguration.MUDMAP_HOME,application,name) if os.path.exists(folder): json_files = [os.path.join(folder,f) for f in os.listdir(folder) if f[-5:] == ".json"] else: json_files = None #generate the source data data_source = DataSource.objects.get(name="mudmap") input_name = "{}_{}".format(application,name) mudmap_input = None if json_files: #create or update input try: mudmap_input = Input.objects.get(name=input_name) except Input.DoesNotExist: mudmap_input = Input(name=input_name,data_source=data_source,generate_rowid=False) source = Template(data_source.vrt).render(Context({"files":json_files,"self":mudmap_input})) mudmap_input.source = source mudmap_input.full_clean(exclude=["data_source"]) if mudmap_input.pk: mudmap_input.last_modify_time = timezone.now() mudmap_input.save(update_fields=["source","last_modify_time","info"]) else: mudmap_input.save() #get or create publish mudmap_publish = None try: mudmap_publish = Publish.objects.get(name=input_name) except Publish.DoesNotExist: #not exist, create it workspace = Workspace.objects.get(name="mudmap") mudmap_publish = Publish( name=input_name, workspace=workspace, interval=JobInterval.Manually, status=ResourceStatus.Enabled, input_table=mudmap_input,sql="$$".join(Publish.TRANSFORM).strip() ) mudmap_publish.full_clean(exclude=["interval"]) mudmap_publish.save() #pubish the job result = JobStatemachine._create_job(mudmap_publish,JobInterval.Triggered) if result[0]: return result[1] else: raise Exception(result[1]) else: #no more json files, delete input, and all other dependent objects. try: mudmap_input = Input.objects.get(name=input_name) mudmap_input.delete() return None except Input.DoesNotExist: #already deleted pass
def create_harvest_job(self, request, queryset): job_batch_id = JobInterval.Manually.job_batch_id() result = None failed_objects = [] for publish in queryset: result = JobStatemachine.create_job(publish.id, JobInterval.Manually, job_batch_id) if not result[0]: failed_objects.append( ("{0}:{1}".format(publish.workspace.name, publish.name), result[1])) if failed_objects: messages.warning( request, mark_safe( "Create job failed for some selected publishs:<ul>{0}</ul>" .format("".join([ "<li>{0} : {1}</li>".format(o[0], o[1]) for o in failed_objects ])))) else: messages.success( request, "Create job successfully for all selected publishs")
def handle(self, *args, **options): check = options["check_datasource"] check_interval = 0 if check: try: check_interval = int(options["check_interval"]) * 60 if check_interval < 0: check_interval = 0 except: check_interval = 0 # Parse the days argument as an integer. if options['job_id']: try: options['job_id'] = int(options['job_id']) except: raise Exception("job id should be integer") if options['user_action']: if options['job_state']: JobStatemachine.send_user_action(options['job_id'], options['job_state'], options['user_action']) else: JobStatemachine.send_user_action(options['job_id'], None, options['user_action']) elif options['job_state']: raise Exception("missing action parameter") else: JobStatemachine.run_job(options['job_id']) elif options['run_interval']: try: options['run_interval'] = int(options['run_interval']) except: raise Exception("job id should be a positive integer.") if options['run_interval'] <= 0: raise Exception("job id should be a positive integer.") HarvestModifyTime(check, check_interval, True).harvest() JobStatemachine.running(options['run_interval']) elif check: HarvestModifyTime(check, check_interval).harvest() else: JobStatemachine.run_all_jobs() return 0
def handle(self, *args, **options): check = options["check_datasource"] check_interval = 0 if check: try: check_interval = int(options["check_interval"]) * 60 if check_interval < 0: check_interval = 0 except: check_interval = 0 # Parse the days argument as an integer. if options["job_id"]: try: options["job_id"] = int(options["job_id"]) except: raise Exception("job id should be integer") if options["user_action"]: if options["job_state"]: JobStatemachine.send_user_action(options["job_id"], options["job_state"], options["user_action"]) else: JobStatemachine.send_user_action(options["job_id"], None, options["user_action"]) elif options["job_state"]: raise Exception("missing action parameter") else: JobStatemachine.run_job(options["job_id"]) elif options["run_interval"]: try: options["run_interval"] = int(options["run_interval"]) except: raise Exception("job id should be a positive integer.") if options["run_interval"] <= 0: raise Exception("job id should be a positive integer.") HarvestModifyTime(check, check_interval, True).harvest() JobStatemachine.running(options["run_interval"]) elif check: HarvestModifyTime(check, check_interval).harvest() else: JobStatemachine.run_all_jobs() return 0
def create(self): job_batch_id = Triggered.instance().job_batch_id resp = {"status":True, "message":{}} result = None for name in self.data.get('publishes') or []: result = JobStatemachine.create_job_by_name(name,Triggered.instance(),job_batch_id) if result[0]: resp["message"][name] = "job id : {0}".format(result[1]) else: resp["status"] = False resp["message"][name] = result[1] return resp
def create_harvest_job(self,request,queryset): job_batch_id = Manually.instance().job_batch_id result = None failed_objects = [] for publish in queryset: result = JobStatemachine.create_job(publish.id,Manually.instance(),job_batch_id) if not result[0]: failed_objects.append(("{0}:{1}".format(publish.workspace.name,publish.name),result[1])) if failed_objects: messages.warning(request, mark_safe("Create job failed for some selected publishs:<ul>{0}</ul>".format("".join(["<li>{0} : {1}</li>".format(o[0],o[1]) for o in failed_objects])))) else: messages.success(request, "Create job successfully for all selected publishs")
def create(self): job_batch_id = Triggered.instance().job_batch_id resp = {"status": True, "message": {}} result = None for name in self.data.get('publishes') or []: result = JobStatemachine.create_job_by_name( name, Triggered.instance(), job_batch_id) if result[0]: resp["message"][name] = "job id : {0}".format(result[1]) else: resp["status"] = False resp["message"][name] = result[1] return resp
def create(self): job_batch_id = JobInterval.Triggered.job_batch_id() resp = {"status":True} result = None for name in self.data.get('publishes') or []: resp[name] = {} try: result = JobStatemachine.create_job_by_name(name,JobInterval.Triggered,job_batch_id) if result[0]: resp[name]["status"] = True resp[name]["job_id"] = result[1] resp[name]["message"] = "Succeed" else: resp["status"] = False resp[name]["status"] = False resp[name]["message"] = result[1] except : msg = traceback.format_exc() logger.error(msg) resp["status"] = False resp[name]["status"] = False resp[name]["message"] = msg return resp
def handle(self, *args, **options): # Parse the days argument as an integer. if options['interval_choice']: if options['publish_id'] or options['publish_name'] : raise Exception("Three options cannot be used together.") else: interval = JobInterval.get_interval(options['interval_choice']) JobStatemachine.create_jobs(interval) elif options['publish_id']: if options['publish_name']: raise Exception("Three options cannot be used together.") else: JobStatemachine.create_job(options['publish_id'],Manually.instance()) elif options['publish_name']: JobStatemachine.create_job_by_name(options['publish_name'],Manually.instance()) else: raise Exception("No option is specified") return 0
def handle(self, *args, **options): # Parse the days argument as an integer. if options['interval_choice']: if options['publish_id'] or options['publish_name']: raise Exception("Three options cannot be used together.") else: interval = JobInterval.get_interval(options['interval_choice']) JobStatemachine.create_jobs(interval) elif options['publish_id']: if options['publish_name']: raise Exception("Three options cannot be used together.") else: JobStatemachine.create_job(options['publish_id'], Manually.instance()) elif options['publish_name']: JobStatemachine.create_job_by_name(options['publish_name'], Manually.instance()) else: raise Exception("No option is specified") return 0
def _publish(self, application, name, user): #get list of geojson files folder = os.path.join(BorgConfiguration.MUDMAP_HOME, application, name) if os.path.exists(folder): json_files = [ os.path.join(folder, f) for f in os.listdir(folder) if f[-5:] == ".json" ] else: json_files = None #generate the source data data_source = DataSource.objects.get(name="mudmap") input_name = "{}_{}".format(application, name) mudmap_input = None if json_files: #create or update input try: mudmap_input = Input.objects.get(name=input_name) except Input.DoesNotExist: mudmap_input = Input(name=input_name, data_source=data_source, generate_rowid=False) source = Template(data_source.vrt).render( Context({ "files": json_files, "self": mudmap_input })) mudmap_input.source = source mudmap_input.full_clean(exclude=["data_source"]) if mudmap_input.pk: mudmap_input.last_modify_time = timezone.now() mudmap_input.save( update_fields=["source", "last_modify_time", "info"]) else: mudmap_input.save() #get or create publish mudmap_publish = None try: mudmap_publish = Publish.objects.get(name=input_name) except Publish.DoesNotExist: #not exist, create it workspace = Workspace.objects.get(name="mudmap") mudmap_publish = Publish(name=input_name, workspace=workspace, interval=JobInterval.Manually, status=ResourceStatus.Enabled, input_table=mudmap_input, sql="$$".join( Publish.TRANSFORM).strip()) mudmap_publish.full_clean(exclude=["interval"]) mudmap_publish.save() #pubish the job result = JobStatemachine._create_job(mudmap_publish, JobInterval.Triggered) if result[0]: return result[1] else: raise Exception(result[1]) else: #no more json files, delete input, and all other dependent objects. try: mudmap_input = Input.objects.get(name=input_name) mudmap_input.delete() return None except Input.DoesNotExist: #already deleted pass
def execute(self,time): return JobStatemachine.create_jobs(self._interval,RepeatedJob.job_batch_id)
def execute(self,time): try: return JobStatemachine.run_all_jobs(self._first_run) finally: self._first_run = False
def execute(self, time): try: return JobStatemachine.run_all_jobs(self._first_run) finally: self._first_run = False
def execute(self, time): return JobStatemachine.create_jobs(self._interval, RepeatedJob.job_batch_id)