def _update_analysis_reports_and_datastore(tnode_, task_): assert (len(tnode_.meta_task.output_file_display_names) == len(tnode_.meta_task.output_file_descriptions) == len(tnode_.meta_task.output_types) == len(task_.output_files)) for i_file, (file_type_, path_, name, description) in enumerate(zip( tnode_.meta_task.output_types, task_.output_files, tnode_.meta_task.output_file_display_names, tnode_.meta_task.output_file_descriptions)): source_id = "{t}-out-{i}".format(t=task_.task_id, i=i_file) if tnode_.meta_task.datastore_source_id is not None: source_id = tnode_.meta_task.datastore_source_id ds_uuid = _get_or_create_uuid_from_file(path_, file_type_) is_chunked_ = _is_chunked_task_node_type(tnode_) ds_file_ = DataStoreFile(ds_uuid, source_id, file_type_.file_type_id, path_, is_chunked=is_chunked_, name=name, description=description) ds.add(ds_file_) ds.write_update_json(job_resources.datastore_json) # Update Services services_add_datastore_file(ds_file_) dsr = DU.datastore_to_report(ds) R.write_report_to_html(dsr, os.path.join(job_resources.html, 'datastore.html')) if file_type_ == FileTypes.REPORT: T.write_task_report(job_resources, task_.task_id, path_, DU._get_images_in_dir(task_.output_dir)) update_analysis_file_links(tnode_.idx, path_)
def _update_analysis_reports_and_datastore(tnode_, task_): for file_type_, path_ in zip(tnode_.meta_task.output_types, task_.output_files): source_id = "{t}-{f}".format(t=task_.task_id, f=file_type_.file_type_id) ds_uuid = _get_dataset_uuid_or_create_uuid(path_) ds_file_ = DataStoreFile(ds_uuid, source_id, file_type_.file_type_id, path_) ds.add(ds_file_) ds.write_update_json(job_resources.datastore_json) # Update Services services_add_datastore_file(ds_file_) dsr = DU.datastore_to_report(ds) R.write_report_to_html(dsr, os.path.join(job_resources.html, 'datastore.html')) if file_type_ == FileTypes.REPORT: T.write_task_report(job_resources, task_.task_id, path_, DU._get_images_in_dir(task_.output_dir)) update_analysis_file_links(tnode_.idx, path_)