Exemple #1
0
def afq_process_subject(subject):
    # define a function that each job will run
    # In this case, each process does a single subject
    import logging
    import s3fs
    # all imports must be at the top of the function
    # cloudknot installs the appropriate packages from pip
    import AFQ.data as afqd
    import AFQ.api as api
    import AFQ.definitions.mask as afm

    # set logging level to your choice
    logging.basicConfig(level=logging.INFO)
    log = logging.getLogger(__name__)

    # Download the given subject to your local machine from s3
    # Can find subjects more easily if they are specified in a
    # BIDS participants.tsv file, even if it is sparse
    study_ixi = afqd.S3BIDSStudy("my_study",
                                 "my_study_bucket",
                                 "my_study_prefix",
                                 subjects=[subject],
                                 use_participants_tsv=True,
                                 anon=False)
    study_ixi.download("local_bids_dir", include_derivs=["pipeline_name"])

    # you can optionally provide your own segmentation file
    # in this case, we look for a file with suffix 'seg'
    # in the 'pipeline_name' pipeline,
    # and we consider all non-zero labels to be a part of the brain
    brain_mask = afm.LabelledMaskFile('seg', {'scope': 'pipeline_name'},
                                      exclusive_labels=[0])

    # define the api AFQ object
    myafq = api.AFQ(
        "local_bids_dir",
        dmriprep="pipeline_name",
        brain_mask=brain_mask,
        viz_backend=
        'plotly',  # this will generate both interactive html and GIFs
        scalars=["dki_fa", "dki_md"])

    # export_all runs the entire pipeline and creates many useful derivates
    myafq.export_all()

    # upload the results to some location on s3
    myafq.upload_to_s3(s3fs.S3FileSystem(),
                       f"my_study_bucket/my_study_prefix/derivatives/afq")
Exemple #2
0
def afq_hcp(subject, aws_access_key, aws_secret_key, hcp_aws_access_key,
            hcp_aws_secret_key, outbucket):

    fs = s3fs.S3FileSystem(key=aws_access_key, secret=aws_secret_key)

    # Configuration:

    # session = "1200"
    session = "Retest"

    seg_algo = "reco80"
    reuse_tractography = False
    bundle_info = None
    shell = "multi"
    my_hcp_key = f"{outbucket}/hcp_reliability"
    logging.basicConfig(level=logging.INFO)
    log = logging.getLogger(__name__) # noqa
    log.info(f"Subject: {subject}")
    remote_export_path = (
        f"{my_hcp_key}/{shell}_shell/"
        f"hcp_{session.lower()}_{seg_algo}_csd_det_azure")


    # get HCP data for the given subject / session
    _, hcp_bids = fetch_hcp(
        [subject],
        profile_name=False,
        aws_access_key_id=hcp_aws_access_key,
        aws_secret_access_key=hcp_aws_secret_key)

    tracking_params = {
        'seed_mask': afm.ScalarMask('dki_fa'),
        'stop_mask': afm.ScalarMask('dki_fa'),
        "odf_model": "CSD",
        "directions": "det"}
    kwargs = {
        "scalars": ["dki_fa", "dki_md"]
    }

    # Whether to reuse a previous tractography that has already been
    # uploaded to s3 by another run of this function. Useful if you want to
    # try new parameters that do not change the tractography.
    if reuse_tractography:
        rpath = (
            f"{my_hcp_key}/{shell}_shell/"
            f"hcp_{session.lower()}_afq/sub-{subject}/ses-01/"
            f"sub-{subject}_dwi_space-RASMM_model-"
            f"{tracking_params['odf_model']}_desc-prob_tractography.trk")
        lpath = (
            f"derivatives/dmriprep/sub-{subject}/"
            f"ses-01/sub-{subject}_customtrk.trk")
        if fs.exists(rpath):
            log.info(f"Gettng {rpath}")
            fs.get(
                rpath,
                op.join(hcp_bids, lpath))
        else:
            raise ValueError(f"Could not find {rpath}")

        custom_tractography_bids_filters = {
            "suffix": "customtrk", "scope": "dmriprep"}
    else:
        custom_tractography_bids_filters = None

    # Initialize the AFQ object with all of the parameters we have set so
    # far Also uses the brain mask provided by HCP Sets viz_backend='plotly'
    # to make GIFs in addition to the default html visualizations (this adds
    # ~45 minutes)
    myafq = api.AFQ(
        hcp_bids,
        brain_mask=afm.LabelledMaskFile(
                    'seg', {'scope': 'dmriprep'}, exclusive_labels=[0]),
        custom_tractography_bids_filters=custom_tractography_bids_filters,
        tracking_params=tracking_params,
        bundle_info=bundle_info,
        segmentation_params={"seg_algo": seg_algo, "reg_algo": "syn"},
        viz_backend='plotly',
        **kwargs)
    # run the AFQ objects
    myafq.export_all()
    myafq.upload_to_s3(fs, remote_export_path)
def afq_process_subject(subject, seed_mask, n_seeds,
                        aws_access_key, aws_secret_key):
    # define a function that each job will run
    # In this case, each process does a single subject
    import logging
    import s3fs
    # all imports must be at the top of the function
    # cloudknot installs the appropriate packages from pip
    from AFQ.data import fetch_hcp
    import AFQ.api as api
    import AFQ.definitions.mask as afm

    import numpy as np
    import os.path as op

    # set logging level to your choice
    logging.basicConfig(level=logging.INFO)
    log = logging.getLogger(__name__)

    # Download the given subject to the AWS Batch machine from s3
    _, hcp_bids = fetch_hcp(
        [subject],
        profile_name=False,
        study=f"HCP_1200",
        aws_access_key_id=aws_access_key,
        aws_secret_access_key=aws_secret_key)

    # We make a new seed mask for each process based off of the
    # seed_mask argument, which is a string.
    # This is to avoid any complications with pickling the masks.
    if seed_mask == "roi":
        seed_mask_obj = afm.RoiMask()
    elif seed_mask == "fa":
        seed_mask_obj = afm.ScalarMask("dti_fa")
    else:
        seed_mask_obj = afm.FullMask()

    # Determined if n_seeds is per voxel or not
    if n_seeds > 3:
        random_seeds = True
    else:
        random_seeds = False

    # set the tracking_params based off our inputs
    tracking_params = {
        "seed_mask": seed_mask_obj,
        "n_seeds": n_seeds,
        "random_seeds": random_seeds}

    # use segmentation file from HCP to get a brain mask,
    # where everything not labelled 0 is considered a part of the brain
    brain_mask = afm.LabelledMaskFile(
        'seg', {'scope': 'dmriprep'}, exclusive_labels=[0])

    # define the api AFQ object
    myafq = api.AFQ(
        hcp_bids,
        brain_mask=brain_mask,
        tracking_params=tracking_params)

    # export_all runs the entire pipeline and creates many useful derivates
    myafq.export_all()

    # upload the results to some location on s3
    myafq.upload_to_s3(
        s3fs.S3FileSystem(),
        (f"my_study_bucket/my_study_prefix_{seed_mask}_{n_seeds}"
        f"/derivatives/afq"))
Exemple #4
0
def afq_hcp(subject, aws_access_key, aws_secret_key, hcp_aws_access_key,
            hcp_aws_secret_key, outbucket):

    fs = s3fs.S3FileSystem(key=aws_access_key, secret=aws_secret_key)
    log = logging.getLogger(__name__)  # noqa
    log.info(f"Subject: {subject}")

    # Only do it if the output file doesn't already exist:
    if not fs.exists(f"hcp.afq/derivatives/afq/sub-{subject}/"
                     f"ses-01/sub-{subject}"
                     "_dwi_space-RASMM_model-CSD_desc-prob-"
                     "afq_profiles.csv"):
        # Configuration:
        # session = "Retest"
        session = "1200"
        seg_algo = "afq"
        reuse_tractography = True
        bundle_info = api.BUNDLES + api.CALLOSUM_BUNDLES
        shell = "multi"
        my_hcp_key = f"{outbucket}/derivatives/afq/"
        logging.basicConfig(level=logging.INFO)
        remote_export_path = my_hcp_key

        # get HCP data for the given subject / session
        _, hcp_bids = fetch_hcp([subject],
                                profile_name=False,
                                aws_access_key_id=hcp_aws_access_key,
                                aws_secret_access_key=hcp_aws_secret_key)

        tracking_params = {
            'seed_mask': afm.ScalarMask('dki_fa'),
            'stop_mask': afm.ScalarMask('dki_fa'),
            "odf_model": "CSD",
            "directions": "prob"
        }
        kwargs = {"scalars": ["dki_fa", "dki_md", "dki_mk", "dki_awf"]}

        # Whether to reuse a previous tractography that has already been
        # uploaded to s3 by another run of this function. Useful if you want to
        # try new parameters that do not change the tractography.
        custom_tractography_bids_filters = None

        if reuse_tractography:
            rpath = (f"profile-hcp-west/hcp_reliability/multi_shell/"
                     f"hcp_{session.lower()}_reco80_csd_azure/sub-{subject}"
                     f"/ses-01/sub-{subject}_dwi_space-RASMM"
                     f"_model-CSD_desc-prob_tractography.trk")
            #  rpath=(
            #      f"{my_hcp_key}/{shell}_shell/"
            #      f"hcp_{session.lower()}_afq/sub-{subject}/ses-01/"
            #      f"sub-{subject}_dwi_space-RASMM_model-"
            #      f"{tracking_params['odf_model']}_desc-prob_tractography.trk")
            lpath = (f"derivatives/dmriprep/sub-{subject}/"
                     f"ses-01/sub-{subject}_customtrk.trk")
            if fs.exists(rpath):
                log.info(f"Gettng {rpath}")
                fs.get(rpath, op.join(hcp_bids, lpath))
                custom_tractography_bids_filters = {
                    "suffix": "customtrk",
                    "scope": "dmriprep"
                }

        # Initialize the AFQ object with all of the parameters we have set so
        # far Also uses the brain mask provided by HCP Sets viz_backend='plotly'
        # to make GIFs in addition to the default html visualizations (this adds
        # ~45 minutes)
        myafq = api.AFQ(
            hcp_bids,
            brain_mask=afm.LabelledMaskFile('seg', {'scope': 'dmriprep'},
                                            exclusive_labels=[0]),
            custom_tractography_bids_filters=custom_tractography_bids_filters,
            tracking_params=tracking_params,
            bundle_info=bundle_info,
            segmentation_params={
                "seg_algo": seg_algo,
                "reg_algo": "syn"
            },
            viz_backend='plotly',
            **kwargs)
        # run the AFQ objects
        log.info("Running the pyAFQ pipeline")
        myafq.export_all(afqbrowser=False, xforms=False)
        log.info(f"Uploading to {remote_export_path}")
        myafq.upload_to_s3(fs, remote_export_path)
    else:
        log.info(f"Already completed analysis for this subject")