def analysis(request): """ Analysis home """ # get all jobs associated with record group analysis_jobs = Job.objects.filter(job_type='AnalysisJob') # get analysis jobs hierarchy analysis_hierarchy = AnalysisJob.get_analysis_hierarchy() # get analysis jobs lineage analysis_job_lineage = Job.get_all_jobs_lineage( organization=analysis_hierarchy['organization'], record_group=analysis_hierarchy['record_group'], exclude_analysis_jobs=False) # loop through jobs for job in analysis_jobs: # update status job.update_status() # render page return render( request, 'core/analysis.html', { 'jobs': analysis_jobs, 'job_lineage_json': json.dumps(analysis_job_lineage), 'for_analysis': True, 'breadcrumbs': breadcrumb_parser(request) })
def all_jobs(request): """ View to show all jobs, across all Organizations, RecordGroups, and Job types GET Args: include_analysis: if true, include Analysis type jobs """ # get all the record groups. record_groups = RecordGroup.objects.exclude(for_analysis=True) # capture include_analysis GET param if present include_analysis = request.GET.get('include_analysis', False) # get all jobs associated with record group if include_analysis: jobs = Job.objects.all() else: jobs = Job.objects.exclude(job_type='AnalysisJob').all() # get job lineage for all jobs if include_analysis: job_lineage = Job.get_all_jobs_lineage(exclude_analysis_jobs=False) else: job_lineage = Job.get_all_jobs_lineage(exclude_analysis_jobs=True) # loop through jobs and update status for job in jobs: job.update_status() # render page return render( request, 'core/all_jobs.html', { 'jobs': jobs, 'record_groups': record_groups, 'job_lineage_json': json.dumps(job_lineage), 'breadcrumbs': breadcrumb_parser(request) })
def job_analysis(request): """ Run new analysis job """ # if GET, prepare form if request.method == 'GET': # retrieve jobs (limiting if needed) input_jobs = Job.objects.all() # limit if analysis_type set analysis_type = request.GET.get('type', None) subset = request.GET.get('subset', None) if analysis_type == 'published': # load PublishedRecords published = PublishedRecords(subset=subset) # define input_jobs input_jobs = published.published_jobs else: published = None # get validation scenarios validation_scenarios = ValidationScenario.objects.all() # get field mappers field_mappers = FieldMapper.objects.all() # get record identifier transformation scenarios rits = RecordIdentifierTransformation.objects.all() # get job lineage for all jobs (filtered to input jobs scope) job_lineage = Job.get_all_jobs_lineage(jobs_query_set=input_jobs) # get all bulk downloads bulk_downloads = DPLABulkDataDownload.objects.all() # render page return render( request, 'core/job_analysis.html', { 'job_select_type': 'multiple', 'input_jobs': input_jobs, 'published': published, 'validation_scenarios': validation_scenarios, 'rits': rits, 'field_mappers': field_mappers, 'xml2kvp_handle': xml2kvp.XML2kvp(), 'analysis_type': analysis_type, 'bulk_downloads': bulk_downloads, 'job_lineage_json': json.dumps(job_lineage) }) # if POST, submit job if request.method == 'POST': cjob = CombineJob.init_combine_job( user=request.user, # TODO: record_group=record_group, job_type_class=AnalysisJob, job_params=request.POST) # start job and update status job_status = cjob.start_job() # if job_status is absent, report job status as failed if job_status is False: cjob.job.status = 'failed' cjob.job.save() return redirect('analysis')
def job_merge(request, org_id, record_group_id): """ Merge multiple jobs into a single job """ # retrieve record group record_group = RecordGroup.objects.get(pk=record_group_id) # if GET, prepare form if request.method == 'GET': # get scope of input jobs and retrieve input_job_scope = request.GET.get('scope', None) # if all jobs, retrieve all jobs if input_job_scope == 'all_jobs': input_jobs = Job.objects.exclude( job_type='AnalysisJob').all() # else, limit to RecordGroup else: input_jobs = record_group.job_set.all() # get validation scenarios validation_scenarios = ValidationScenario.objects.all() # get record identifier transformation scenarios rits = RecordIdentifierTransformation.objects.all() # get field mappers field_mappers = FieldMapper.objects.all() # get job lineage for all jobs (filtered to input jobs scope) job_lineage = Job.get_all_jobs_lineage(jobs_query_set=input_jobs) # get all bulk downloads bulk_downloads = DPLABulkDataDownload.objects.all() # render page return render(request, 'core/job_merge.html', { 'job_select_type': 'multiple', 'record_group': record_group, 'input_jobs': input_jobs, 'input_job_scope': input_job_scope, 'validation_scenarios': validation_scenarios, 'rits': rits, 'field_mappers': field_mappers, 'xml2kvp_handle': xml2kvp.XML2kvp(), 'job_lineage_json': json.dumps(job_lineage), 'bulk_downloads': bulk_downloads, 'breadcrumbs': breadcrumb_parser(request) }) # if POST, submit job if request.method == 'POST': cjob = CombineJob.init_combine_job( user=request.user, record_group=record_group, job_type_class=MergeJob, job_params=request.POST) # start job and update status job_status = cjob.start_job() # if job_status is absent, report job status as failed if job_status == False: cjob.job.status = 'failed' cjob.job.save() return redirect('record_group', org_id=org_id, record_group_id=record_group.id)