def check_job_executor(data, job_id): job = api.get_job(job_id) # Make sure we have the right schema for the job type # We can identify the Java action schema by looking for 'main_class' if ('main_class' in data) ^ (job.type == 'Java'): raise ex.InvalidException("Schema is not valid for job type %s" % job.type) if 'input_id' in data: b.check_data_source_exists(data['input_id']) b.check_data_source_exists(data['output_id']) main_base.check_cluster_exists(data['cluster_id'])
def check_job_executor(data, job_id): job = api.get_job(job_id) job_type, subtype = edp.split_job_type(job.type) # All types except Java require input and output objects if job_type == 'Java': if not _is_main_class_present(data): raise ex.InvalidDataException('Java job must ' 'specify edp.java.main_class') else: if not ('input_id' in data and 'output_id' in data): raise ex.InvalidDataException("%s job requires 'input_id' " "and 'output_id'" % job.type) b.check_data_source_exists(data['input_id']) b.check_data_source_exists(data['output_id']) if job_type == 'MapReduce' and (subtype == 'Streaming' and not _streaming_present(data)): raise ex.InvalidDataException("%s job " "must specify streaming mapper " "and reducer" % job.type) main_base.check_cluster_exists(data['cluster_id'])
def job_get(job_id): return u.render(api.get_job(job_id).to_wrapped_dict())
def job_get(job_id): return u.render(jobs=api.get_job(job_id))