Exemplo n.º 1
0
    def check_lesion_tract_overlaps(tracts):

        #cached_data = current_app.cache.get(cache_key)
        cache = JobCache(current_app.cache, current_app.cache_lock)

        for tract in tracts:
            # average density maps for this tract based on current query
            # save averaged map and cache the file path
            status = cache.job_status(cache_key, tract.code)
            if status == 'COMPLETE':
                # get file path from cache
                tract_file_path = cache.job_result(cache_key, tract.code)
            else:
                # recalculate density map
                file_path_data = dbu.density_map_file_path_data(request_query)
                if len(file_path_data) > 0:
                    current_app.logger.info(f'Adding job {tract.code}')
                    cache.add_job(cache_key, tract.code)
                    data_dir = current_app.config[
                        'DATA_FILE_PATH']  # file path to data folder
                    tract_file_path = du.generate_average_density_map(
                        data_dir, file_path_data, tract, 'MNI')
                    cache.job_complete(cache_key, tract.code, tract_file_path)
                    current_app.logger.info(f'Job {tract.code} complete')
                else:
                    current_app.logger.info(
                        f'No subjects returned for query {json.dumps(request_query, indent=4)}'
                    )
                    return 'No subjects returned for the current query', 404


#             # perform weighted overlap: lesion * tract
#             tract_data = du.get_nifti_data(tract_file_path)
#             overlap = lesion_data * tract_data
#             # weighted sum of voxels occupied by overlap
#             # figure out percentage of tract overlapping with lesion
#             overlap_sum = np.sum(overlap)
#             if overlap_sum:
#                 overlap_score = overlap_sum / np.sum(tract_data)
#                 # add dict to intersecting_tracts list
#                 intersecting_tracts.append({"tractCode": tract.code, "overlapScore": overlap_score})

# alternative overlap score
            tract_data = du.get_nifti_data(tract_file_path)
            masked_tract_data = ma.masked_where(tract_data < threshold,
                                                tract_data)
            overlap = lesion_data * masked_tract_data
            over_threshold_count = masked_tract_data.count()
            over_threshold_overlap_count = len(overlap.nonzero()[0])
            if over_threshold_overlap_count:
                overlap_percent = (over_threshold_overlap_count /
                                   over_threshold_count) * 100.
                # add dict to intersecting_tracts list
                intersecting_tracts.append({
                    "tractName": tract.name,
                    "tractCode": tract.code,
                    "overlapScore": overlap_percent,
                    "description": tract.description
                })
Exemplo n.º 2
0
    def test_get_nifti_data(self):
        test_file_path = 'test_file_path.nii.gz'
        test_nifti = Nifti1Image(np.ones(self.nifti_dim, dtype=np.int16),
                                 self.test_affine)

        with monkey_patch(
                du.nib, 'load', lambda file_path: test_nifti
                if file_path == test_file_path else ''):
            data = du.get_nifti_data(test_file_path)
            assert type(data) == np.ndarray
            assert np.all(data == 1)
            assert data.shape == self.nifti_dim
Exemplo n.º 3
0
def lesion_tract_disconnect(lesion_code, tract_code):
    # get the request query
    request_query = jquery_unparam(request.query_string.decode('utf-8'))

    current_app.logger.info(
        f'Running lesion tract disconnect for lesion {lesion_code}, tract {tract_code} and query {json.dumps(request_query, indent=4)}'
    )

    file_path_data = dbu.density_map_file_path_data(request_query)
    if not len(file_path_data):
        current_app.logger.info(
            f'No subjects returned for query {json.dumps(request_query, indent=4)}'
        )
        return 'No subjects in dataset query', 400

    data_dir = current_app.config['DATA_FILE_PATH']

    if lesion_code == 'example':
        lesion_file_name = f'{data_dir}/{du.EXAMPLE_LESION_FILE_NAME}'
    else:
        lesion_upload = LesionUpload.query.get(lesion_code)

        if not lesion_upload:
            current_app.logger.warn(
                f'Lesion does not exist in database with code {lesion_code}')
            return 'Lesion code does not exist. Please re-upload lesion.', 500

        lesion_file_name = lesion_upload.saved_file_name

    lesion_data = du.get_nifti_data(lesion_file_name)

    # validate tract code
    tract = dbu.get_tract(tract_code)
    if not tract:
        current_app.logger.warn(
            f'Nonexistent tract code {tract_code}, returning 400...')
        return f'The requested tract {tract_code} does not exist', 400

    num_streamlines_per_subject = []
    disconnected_streamlines_per_subject = []
    percent_disconnect_per_subject = []
    for subject_id, dataset_dir, method in file_path_data:
        file_path = du.file_path(data_dir, dataset_dir, tract.file_path,
                                 method, subject_id, 'MNI', tract_code, 'trk')
        num_streamlines, disconnected_streamlines, percent_disconnect = lu.calculate_tract_disconnection(
            file_path, lesion_data)
        num_streamlines_per_subject.append(num_streamlines)
        disconnected_streamlines_per_subject.append(disconnected_streamlines)
        percent_disconnect_per_subject.append(percent_disconnect)

    average_disconnect = np.mean(percent_disconnect_per_subject)
    std_disconnect = np.std(percent_disconnect_per_subject)
    histogram = np.histogram(percent_disconnect_per_subject,
                             bins=4,
                             range=(0, 100))

    average_num_streamlines = np.mean(num_streamlines_per_subject)
    average_disconnected_streamlines = np.mean(
        disconnected_streamlines_per_subject)

    response_object = {
        'averageDisconnect': average_disconnect,
        'stdDisconnect': std_disconnect,
        'histogram': histogram[0].tolist(),
        'percentDisconnect': percent_disconnect_per_subject,
        'averageNumStreamlines': average_num_streamlines,
        'averageDisconnectedStreamlines': average_disconnected_streamlines
    }

    return make_response(jsonify(response_object)), 200
Exemplo n.º 4
0
def lesion_analysis(lesion_code, threshold):

    cache_key = cu.construct_cache_key(request.query_string.decode('utf-8'))

    # get the request query
    request_query = jquery_unparam(request.query_string.decode('utf-8'))

    current_app.logger.info(
        f'Running lesion analysis for lesion id {lesion_code}, threshold {threshold} and query {json.dumps(request_query, indent=4)}'
    )

    #subject_ids_dataset_path = dbu.subject_id_dataset_file_path(request_query)
    file_path_data = dbu.density_map_file_path_data(request_query)
    if not len(file_path_data):
        current_app.logger.info(
            f'No subjects in query {json.dumps(request_query, indent=4)}')
        return 'No subjects in dataset query', 400

    try:
        threshold = int(
            threshold
        ) * 0.01  # scale threshold to 0 - 1 since density map is stored in this range
    except ValueError:
        current_app.logger.info(
            f'Invalid threshold value {threshold} applied, returning 404...')
        return f'Invalid threshold value {threshold} sent to server.', 404

    data_dir = current_app.config['DATA_FILE_PATH']

    if lesion_code == 'example':
        lesion_data = du.get_nifti_data(
            f'{data_dir}/{du.EXAMPLE_LESION_FILE_NAME}')
    else:
        lesion_upload = LesionUpload.query.get(lesion_code)
        if not lesion_upload:
            current_app.logger.warn(
                f'Lesion does not exist in database with code {lesion_code}')
            return 'Lesion code does not exist. Please re-upload lesion.', 500
        lesion_data = du.get_nifti_data(lesion_upload.saved_file_name)

    rh = nib.load(current_app.config['RIGHT_HEMISPHERE_MASK']).get_data()
    lh = nib.load(current_app.config['LEFT_HEMISPHERE_MASK']).get_data()

    rh_overlap = lesion_data * rh
    lh_overlap = lesion_data * lh

    intersecting_tracts = []

    def check_lesion_tract_overlaps(tracts):

        #cached_data = current_app.cache.get(cache_key)
        cache = JobCache(current_app.cache, current_app.cache_lock)

        for tract in tracts:
            # average density maps for this tract based on current query
            # save averaged map and cache the file path
            status = cache.job_status(cache_key, tract.code)
            if status == 'COMPLETE':
                # get file path from cache
                tract_file_path = cache.job_result(cache_key, tract.code)
            else:
                # recalculate density map
                file_path_data = dbu.density_map_file_path_data(request_query)
                if len(file_path_data) > 0:
                    current_app.logger.info(f'Adding job {tract.code}')
                    cache.add_job(cache_key, tract.code)
                    data_dir = current_app.config[
                        'DATA_FILE_PATH']  # file path to data folder
                    tract_file_path = du.generate_average_density_map(
                        data_dir, file_path_data, tract, 'MNI')
                    cache.job_complete(cache_key, tract.code, tract_file_path)
                    current_app.logger.info(f'Job {tract.code} complete')
                else:
                    current_app.logger.info(
                        f'No subjects returned for query {json.dumps(request_query, indent=4)}'
                    )
                    return 'No subjects returned for the current query', 404


#             # perform weighted overlap: lesion * tract
#             tract_data = du.get_nifti_data(tract_file_path)
#             overlap = lesion_data * tract_data
#             # weighted sum of voxels occupied by overlap
#             # figure out percentage of tract overlapping with lesion
#             overlap_sum = np.sum(overlap)
#             if overlap_sum:
#                 overlap_score = overlap_sum / np.sum(tract_data)
#                 # add dict to intersecting_tracts list
#                 intersecting_tracts.append({"tractCode": tract.code, "overlapScore": overlap_score})

# alternative overlap score
            tract_data = du.get_nifti_data(tract_file_path)
            masked_tract_data = ma.masked_where(tract_data < threshold,
                                                tract_data)
            overlap = lesion_data * masked_tract_data
            over_threshold_count = masked_tract_data.count()
            over_threshold_overlap_count = len(overlap.nonzero()[0])
            if over_threshold_overlap_count:
                overlap_percent = (over_threshold_overlap_count /
                                   over_threshold_count) * 100.
                # add dict to intersecting_tracts list
                intersecting_tracts.append({
                    "tractName": tract.name,
                    "tractCode": tract.code,
                    "overlapScore": overlap_percent,
                    "description": tract.description
                })

    '''Can speed up the loop through tracts by using multiprocessing pool'''

    # get unique tract codes for the datasets / methods selected
    tract_codes = set()
    for key in request_query.keys():
        dc = key
        mc = request_query[key]['method']
        tcs = DatasetTracts.query.with_entities(
            DatasetTracts.tract_code).filter(
                (DatasetTracts.dataset_code == dc)
                & (DatasetTracts.method_code == mc)).all()
        tcs = set(tcs)
        tract_codes = tract_codes or tcs
        tract_codes = tract_codes.intersection(tcs)
    # explode the inner tuples
    tract_codes = [tc[0] for tc in tract_codes]

    if np.any(rh_overlap):
        current_app.logger.info(
            'Checking lesion overlap with right hemisphere tracts.')
        # loop through right hemisphere tracts
        tracts = Tract.query.filter(
            Tract.code.in_(tract_codes)
            & Tract.code.like('%\_R')).all()  # escape sql wildcard _
        check_lesion_tract_overlaps(tracts)

    if np.any(lh_overlap):
        current_app.logger.info(
            'Checking lesion overlap with left hemisphere tracts.')
        # loop through left hemisphere tracts
        tracts = Tract.query.filter(
            Tract.code.in_(tract_codes) & Tract.code.like('%\_L')).all()
        check_lesion_tract_overlaps(tracts)

    # loop through tracts connecting hemispheres
    current_app.logger.info(
        'Checking lesion overlap with tracts connecting hemispheres.')
    tracts = Tract.query.filter(
        Tract.code.in_(tract_codes) & ~Tract.code.like('%\_R')
        & ~Tract.code.like('%\_L')).all()  # ~ negates the like
    check_lesion_tract_overlaps(tracts)

    # sort tracts by overlap score (highest to lowest)
    intersecting_tracts = sorted(intersecting_tracts,
                                 key=lambda tract: tract["overlapScore"])[::-1]

    return make_response(jsonify(intersecting_tracts)), 200
Exemplo n.º 5
0
def get_dynamic_tract_info(tract_code, threshold):
    current_app.logger.info(
        f'Getting dynamic tract info for tract {tract_code} and threshold {threshold}.'
    )

    cache = JobCache(current_app.cache, current_app.cache_lock)

    query_string_decoded = request.query_string.decode('utf-8')
    cache_key = cu.construct_cache_key(query_string_decoded)

    # jquery_unparam query string
    # check request query is valid
    request_query = jquery_unparam(query_string_decoded)
    if not check_request_query(request_query):
        current_app.logger.info(
            f'Could not properly parse param string {query_string_decoded} in /generate_mean_maps, returning 400...'
        )
        return 'Could not parse query param string.', 400

    # validate tract code
    tract = dbu.get_tract(tract_code)
    if not tract:
        current_app.logger.info(
            f'Tract with code {tract_code} does not exist, returning 404...')
        return 'The requested tract ' + tract_code + ' does not exist', 404

    # validate threshold
    try:
        threshold = int(
            threshold
        ) * 0.01  # scale threshold to 0 - 1 since density map is stored in this range
    except ValueError:
        current_app.logger.info(
            'Invalid threshold value applied, returning 404...')
        return f'Invalid threshold value {threshold} sent to server.', 404

    # check mean_maps job status
    mean_maps_status = cache.add_job_locked(cache_key, 'mean_maps')

    if mean_maps_status in ['PROCEED', 'FAILED', None]:
        # job ready to go or cache could not be accessed
        current_app.logger.info(
            f'mean_maps job status is {mean_maps_status}. Generating mean_maps for query {json.dumps(request_query, indent=4)}'
        )
        subject_ids_dataset_paths = dbu.subject_id_dataset_file_path(
            request_query)

        if len(subject_ids_dataset_paths) > 0:

            if mean_maps_status: cache.job_in_progress(cache_key, 'mean_maps')

            data_dir = current_app.config['DATA_FILE_PATH']
            mean_FA = du.subject_averaged_FA(subject_ids_dataset_paths,
                                             data_dir)
            mean_MD = du.subject_averaged_MD(subject_ids_dataset_paths,
                                             data_dir)

            if mean_maps_status:
                cache.job_complete(cache_key, 'mean_maps', {
                    'FA': mean_FA,
                    'MD': mean_MD
                })

        else:
            # no subjects returned in query
            current_app.logger.info(
                f'No subjects returned for query {json.dumps(request_query, indent=4)}'
            )
            return 'No subjects returned in query', 404

    elif mean_maps_status in ['STAGED', 'IN_PROGRESS']:

        current_app.logger.info(
            f'mean_maps job in progress waiting for job to finish...')
        # poll cache until COMPLETE
        # set status to failed if waiting 20 secs
        timeout = 20
        cache.poll_cache(cache_key, 'mean_maps', timeout, 0.2)

        if cache.job_status(cache_key, 'mean_maps') == 'COMPLETE':
            current_app.logger.info('mean_maps job complete')
            # get FA and MD maps from cache
            mean_maps = cache.job_result(cache_key, 'mean_maps')
            FA_file_path = mean_maps.get('FA')
            MD_file_path = mean_maps.get('MD')
        else:
            current_app.logger.warn(
                f'mean_maps job failed to complete in {timeout} secs, setting job status to FAILED and returning...'
            )
            cache.job_failed(cache_key, 'mean_maps')
            return 'mean_maps job FAILED', 500

    elif mean_maps_status == 'COMPLETE':
        current_app.logger.info('mean_maps job complete')
        # get FA and MD maps from cache
        mean_maps = cache.job_result(cache_key, 'mean_maps')
        FA_file_path = mean_maps.get('FA')
        MD_file_path = mean_maps.get('MD')

    # check if tract probability map has been cached or needs to be recreated
    tract_status = cache.add_job_locked(cache_key, tract_code)

    if tract_status in ['PROCEED', 'FAILED',
                        None]:  # new job created or could not access cache
        current_app.logger.info(
            f'{tract_code} job status is {tract_status}, generating new probability map...'
        )
        file_path_data = dbu.density_map_file_path_data(request_query)

        if len(file_path_data) > 0:

            if tract_status:
                current_app.logger.info(
                    f'Adding {tract_code} job for query {json.dumps(request_query, indent=4)}'
                )
                cache.job_in_progress(cache_key, tract_code)
            else:
                current_app.logger.info(
                    f'Calculating probability map for tract {tract_code} and query {json.dumps(request_query, indent=4)}'
                )

            data_dir = current_app.config[
                'DATA_FILE_PATH']  # file path to data folder
            tract_file_path = du.generate_average_density_map(
                data_dir, file_path_data, tract, 'MNI')

            if tract_status:
                cache.job_complete(cache_key, tract_code, tract_file_path)
                current_app.logger.info(
                    f'{tract_code} job complete for query {json.dumps(request_query, indent=4)}'
                )
            else:
                current_app.logger.info(
                    f'Completed probabilty map for tract {tract_code} and query {json.dumps(request_query, indent=4)}'
                )

        else:
            current_app.logger.info(
                f'No subjects returned for query {json.dumps(request_query, indent=4)}'
            )
            return "No subjects returned for the current query", 404

    elif tract_status in ['STAGED',
                          'IN_PROGRESS']:  # another worker is running the job

        current_app.logger.info(
            f'{tract_code} job in progress, waiting to complete...')
        # poll cache waiting for complete status (max wait 10 secs before quitting)
        timeout = 10
        cache.poll_cache(cache_key, tract_code, timeout, 0.2)

        # set status to FAILED if not COMPLETE after 10 secs
        if cache.job_status(cache_key, tract_code) == 'COMPLETE':
            tract_file_path = cache.job_result(cache_key, tract_code)
        else:
            current_app.logger.warn(
                f'{tract_code} job did not complete in {timeout} secs, setting job status to FAILED.'
            )
            cache.job_failed(cache_key, tract_code)
            return f'Job {tract_code} timed out for query {json.dumps(request_query, indent=4)}.', 500

    elif tract_status == 'COMPLETE':  # job has already been completed

        current_app.logger.info(f'{tract_code} job complete.')
        # job has already been run, get file_path from cache
        tract_file_path = cache.job_result(cache_key, tract_code)

    # calculate results and return
    FA_map_data = du.get_nifti_data(FA_file_path)
    MD_map_data = du.get_nifti_data(MD_file_path)
    tract_data = du.get_nifti_data(tract_file_path)
    mean_FA, std_FA = du.averaged_tract_mean_std(FA_map_data, tract_data,
                                                 threshold)
    mean_MD, std_MD = du.averaged_tract_mean_std(MD_map_data, tract_data,
                                                 threshold)
    vol = du.averaged_tract_volume(tract_data, threshold)

    results = {}
    results['tractCode'] = tract_code
    results['tractName'] = tract.name
    results['volume'] = vol
    results['meanFA'] = mean_FA
    results['stdFA'] = std_FA
    results['meanMD'] = mean_MD
    results['stdMD'] = std_MD

    return jsonify(results)