def update_expected_scans(study, site_id, config, skip_delete=False, delete_all=False): """Update number and type of expected scans for a site. Args: study (:obj:`dashboard.dashboard.models.Study`): A study from the database. site_id (:obj:`str`): The name of a site configured for the study. config (:obj:`datman.config.config`): A config instance for the study. skip_delete (bool, optional): Don't prompt the user and skip deletion of any records no longer defined in the config files. delete_all (bool, optional): Don't prompt the user and delete any records no longer defined in the config files. """ try: tag_settings = config.get_tags(site_id) except UndefinedSetting: logger.debug(f"No tags defined for site {site_id}. Skipping update.") return if site_id in study.scantypes: undefined = [ entry for entry in study.scantypes[site_id] if entry.scantype_id not in tag_settings ] if undefined: delete_records( undefined, prompt="Expected scan type {} not defined in config files.", skip_delete=skip_delete, delete_all=delete_all) for tag in tag_settings: try: sub = tag_settings.get(tag, "Count") except KeyError: sub = None try: pha = tag_settings.get(tag, "PhaCount") except KeyError: pha = None try: study.update_scantype(site_id, tag, num=sub, pha_num=pha, create=True) except Exception as e: logger.error(f"Failed to update expected scans for {study.id} " f"site {site_id} and tag {tag}. Reason - {e}.")
def generate_qc_report(report_name, subject, expected_files, header_diffs, config): tag_settings = config.get_tags(site=subject.site) try: with open(report_name, 'wb') as report: write_report_header(report, subject.full_id) write_table(report, expected_files, subject) write_tech_notes_link(report, subject.site, config.study_name, subject.resource_path) write_report_body(report, expected_files, subject, header_diffs, tag_settings) except: raise update_dashboard(subject, report_name)
def generate_qc_report(report_name, subject, expected_files, header_diffs, config): tag_settings = config.get_tags(site=subject.site) with open(report_name, 'w') as report: write_report_header(report, subject.full_id) write_table(report, expected_files, subject) write_tech_notes_link(report, subject.site, config.study_name, subject.resource_path) write_report_body(report, expected_files, subject, header_diffs, tag_settings) update_dashboard(subject, report_name, header_diffs)
def generate_qc_report(report_name, subject, expected_files, header_diffs, config): tag_settings = config.get_tags() try: with open(report_name, 'wb') as report: write_report_header(report, subject.full_id) write_table(report, expected_files, subject) write_tech_notes_link(report, subject.site, config.study_name, subject.resource_path) write_report_body(report, expected_files, subject, header_diffs, tag_settings) except: raise
def find_expected_files(subject, config): """ Reads the export_info from the config for this site and compares it to the contents of the nii folder. Data written to a pandas dataframe. """ export_info = config.get_tags(subject.site) sorted_niftis = sorted(subject.niftis, key=lambda item: item.series_num) tag_counts, expected_positions = initialize_counts(export_info) # init output pandas data frame, counter idx = 0 expected_files = pd.DataFrame(columns=['tag', 'File', 'bookmark', 'Note', 'Sequence']) # tabulate found data in the order they were acquired for nifti in sorted_niftis: tag = nifti.tag # only check data that is defined in the config file if tag in export_info: expected_count = export_info.get(tag, 'Count') else: continue tag_counts[tag] += 1 bookmark = tag + str(tag_counts[tag]) if tag_counts[tag] > expected_count: notes = 'Repeated Scan' else: notes = '' position = get_position(expected_positions[tag]) expected_files.loc[idx] = [tag, nifti, bookmark, notes, position] idx += 1 # note any missing data for tag in export_info: expected_count = export_info.get(tag, 'Count') if tag_counts[tag] < expected_count: n_missing = expected_count - tag_counts[tag] notes = 'missing({})'.format(n_missing) expected_files.loc[idx] = [tag, '', '', notes, expected_positions[tag]] idx += 1 expected_files = expected_files.sort_values('Sequence') return(expected_files)
def find_expected_files(subject, config): """ Reads the export_info from the config for this site and compares it to the contents of the nii folder. Data written to a pandas dataframe. """ export_info = config.get_tags(subject.site) sorted_niftis = sorted(subject.niftis, key=lambda item: item.series_num) tag_counts, expected_positions = initialize_counts(export_info) # init output pandas data frame, counter idx = 0 expected_files = pd.DataFrame( columns=['tag', 'File', 'bookmark', 'Note', 'Sequence']) # tabulate found data in the order they were acquired for nifti in sorted_niftis: tag = nifti.tag # only check data that is defined in the config file if tag in export_info: expected_count = export_info.get(tag, 'Count') else: continue tag_counts[tag] += 1 bookmark = tag + str(tag_counts[tag]) if tag_counts[tag] > expected_count: notes = 'Repeated Scan' else: notes = '' position = get_position(expected_positions[tag]) expected_files.loc[idx] = [tag, nifti, bookmark, notes, position] idx += 1 # note any missing data for tag in export_info: expected_count = export_info.get(tag, 'Count') if tag_counts[tag] < expected_count: n_missing = expected_count - tag_counts[tag] notes = 'missing({})'.format(n_missing) expected_files.loc[idx] = [ tag, '', '', notes, expected_positions[tag] ] idx += 1 expected_files = expected_files.sort_values('Sequence') return (expected_files)
def run_header_qc(subject, standard_dir, log_file, config): """ For each .dcm file found in 'dicoms', find the matching site / tag file in 'standards', and run qc-headers (from qcmon) on these files. Any are written to log_file. """ if not subject.dicoms: logger.debug("No dicoms found in {}".format(subject.dcm_path)) return standards_dict = get_standards(standard_dir, subject.site) tag_settings=config.get_tags(site=subject.site) for dicom in subject.dicoms: try: standard = standards_dict[dicom.tag] except KeyError: logger.debug('No standard with tag {} found in {}'.format(dicom.tag, standard_dir)) continue else: #run header check for dicom #if the scan is dti, call qc-headers with the dti tag if tag_settings.get(dicom.tag, "qc_type") == 'dti': datman.utils.run('qc-headers {} {} {} --dti'.format(dicom.path, standard.path, log_file)) logger.debug('doing dti {}'.format(dicom.tag)) else: logger.debug('doing other scantype {}'.format(dicom.tag)) datman.utils.run('qc-headers {} {} {}'.format(dicom.path, standard.path, log_file)) if not os.path.exists(log_file): logger.error("header-diff.log not generated for {}. Check that gold " \ "standards are present for this site.".format(subject.full_id))
def qc_phantom(subject, config): """ subject: The Scan object for the subject_id of this run config : The settings obtained from project_settings.yml Phantom pipeline setup: Each pipeline has it's own dictionary entry in gather_input_reqs within input_spec Config 'qc_pha' keys in ExportSettings indicate which pipeline to use 'qc_pha' set to 'default' will refer to the qc_type. So qc_pha is really used to indicate custom pipelines that are non-standard. """ export_tags = config.get_tags(site=subject.site) logger.debug('qc {}'.format(subject)) for nifti in subject.niftis: tag = get_pha_qc_type(export_tags, nifti.tag) #Gather pipeline input requirements and run if pipeline exists for tag input_req = gather_input_req(nifti, tag) if input_req: run_phantom_pipeline(nifti, subject.qc_path, input_req)
def qc_phantom(subject, config): """ subject: The Scan object for the subject_id of this run config : The settings obtained from project_settings.yml Phantom pipeline setup: Each pipeline has it's own dictionary entry in gather_input_reqs within input_spec. Config 'QcPha' keys in ExportSettings indicate which pipeline to use. 'QcPha' set to 'default' will refer to the QcType. So QcPha is used to indicate custom pipelines that are non-standard. """ export_tags = config.get_tags(site=subject.site) logger.debug('qc {}'.format(subject)) for nifti in subject.niftis: tag = get_pha_qc_type(export_tags, nifti.tag) input_req = gather_input_req(nifti, tag) if input_req: run_phantom_pipeline(nifti, subject.qc_path, input_req)
def update_header_diffs(scan): site = scan.session.timepoint.site_id config = datman.config.config(study=scan.get_study().id) try: tolerance = config.get_key("HeaderFieldTolerance", site=site) except Exception: tolerance = {} try: ignore = config.get_key("IgnoreHeaderFields", site=site) except Exception: ignore = [] tags = config.get_tags(site=site) try: qc_type = tags.get(scan.tag, "qc_type") except KeyError: check_bvals = False else: check_bvals = qc_type == 'dti' scan.update_header_diffs(ignore=ignore, tolerance=tolerance, bvals=check_bvals)
def run_header_qc(subject, config): """ For each nifti, finds its json file + compares it to the matching gold standard. Differences are returned in a dictionary with one entry per scan """ try: ignored_headers = config.get_key('IgnoreHeaderFields', site=subject.site) except datman.config.UndefinedSetting: ignored_headers = [] try: header_tolerances = config.get_key('HeaderFieldTolerance', site=subject.site) except datman.config.UndefinedSetting: header_tolerances = {} tag_settings = config.get_tags(site=subject.site) header_diffs = {} if datman.dashboard.dash_found: db_timepoint = datman.dashboard.get_subject(subject._ident) if not db_timepoint: logger.error("Can't find {} in dashboard database".format(subject)) return for sess_num in db_timepoint.sessions: db_session = db_timepoint.sessions[sess_num] for series in db_session.scans: if not series.active_gold_standard: header_diffs[series.name] = { 'error': 'Gold standard not ' 'found' } continue if not series.json_contents: logger.debug("No JSON found for {}".format(series)) header_diffs[series.name] = {'error': 'JSON not found'} continue check_bvals = needs_bval_check(tag_settings, series) db_diffs = series.update_header_diffs( ignore=ignored_headers, tolerance=header_tolerances, bvals=check_bvals) header_diffs[series.name] = db_diffs.diffs return header_diffs standard_dir = config.get_path('std') standards_dict = get_standards(standard_dir, subject.site) for series in subject.niftis: scan_name = get_scan_name(series) try: standard_json = standards_dict[series.tag] except KeyError: logger.debug('No standard with tag {} found in {}'.format( series.tag, standard_dir)) header_diffs[scan_name] = {'error': 'Gold standard not found'} continue try: series_json = find_json(series) except IOError: logger.debug('No JSON found for {}'.format(series)) header_diffs[scan_name] = {'error': 'JSON not found'} continue check_bvals = needs_bval_check(tag_settings, series) diffs = header_checks.construct_diffs(series_json, standard_json, ignored_fields=ignored_headers, tolerances=header_tolerances, dti=check_bvals) header_diffs[scan_name] = diffs return header_diffs