Esempio n. 1
0
def projpvalonatlas(atlas, pars, ipsi, tt_pval, outdir):
    # project onto labels

    nii = nib.load('%s' % atlas)
    img = nii.get_data()

    # replace intensities with p-values
    for p, par in enumerate(pars):

        newimg = np.ones(img.shape)

        for t, ttx in enumerate(tt_pval[p]):
            orgipsi = ipsi[t]
            idxip = (img == orgipsi)
            newimg[idxip] = tt_pval[p][t]

            # # if want both sides
            # orgcont = contra[w]
            # idxcont = (img == orgcont)
            # newimg[idxcont] = tt_pval[p][w]

        # save new nifti
        mat = np.eye(4) * 0.025
        mat[3, 3] = 1

        newnii = nib.Nifti1Image(newimg, mat)
        niiname = '%s/%s_paired_ttest_pval.nii.gz' % (outdir, pars[p])
        nib.save(newnii, niiname)

    # ort out niftis
    with add_paths():
        for par in pars:
            call(["c3d", "%s/%s_paired_ttest_pval.nii.gz" % (outdir, par), "-orient", "ASR", "-o",
                "%s/%s_paired_ttest_pval.nii.gz" % (outdir, par)])
Esempio n. 2
0
def main(args):
    starttime = datetime.now()

    parser = parsefn()
    pl, hemi, res = parse_inputs(parser, args)

    # load annotations
    print("Reading ARA annotation with %s hemispheres and %d voxel size" %
          (hemi, res))

    miracl_home = os.environ['MIRACL_HOME']
    nii = '%s/atlases/ara/annotation/annotation_hemi_%s_%dum.nii.gz' % (
        miracl_home, hemi, res)
    img = nib.load(nii)
    data = img.get_data()

    # load structure graph
    print("Reading ARA ontology structure_graph")
    arastrctcsv = "%s/atlases/ara/ara_mouse_structure_graph_hemi_split.csv" % miracl_home
    aragraph = pd.read_csv(arastrctcsv)

    # get lbls
    lbls = getalllbls(data)

    # loop over intensities
    parentdata = np.copy(data)

    print("Computing parent labels at parent-level/generation %d" % pl)

    parentdata = getlblparent(aragraph, lbls, pl, parentdata, lblsplit,
                              maxannotlbl)

    vx = img.header.get_zooms()[0]
    orgname = basename(nii).split('.')[0]
    outnii = '%s_parent-level_%s.nii.gz' % (orgname, pl)
    saveniiparents(parentdata, vx, outnii)

    # orient
    with add_paths():
        call([
            "c3d",
            "%s" % outnii, "-orient", "ASR", "-type", "ushort", "-o",
            "%s" % outnii
        ])

    # set origin
    # aratemplate = "%s/atlases/ara/template/average_template_50um.nii.gz" % miracl_home
    # aranii = nib.load('%s' % aratemplate)
    #
    # sform = aranii.get_sform()
    # qx = sform[0,3]
    # qy = sform[1,3]
    # qz = sform[2,3]

    # call(["c3d", "%s" % outnii, "-origin", "-11.4x0x0mm", "-o", "%s" % outnii])

    print(
        "\n Grand-parent labels generation done in %s ... Have a good day!\n" %
        (datetime.now() - starttime))
Esempio n. 3
0
def main(args):
    starttime = datetime.now()

    parser = parsefn()
    d, inlbls, hemi, res = parse_inputs(parser, args)


    miracl_home = os.environ['MIRACL_HOME']

    if inlbls == "Allen":

        # load annotations
        print("Reading ARA annotation with %s hemispheres and %d voxel size" % (hemi, res))

        nii = '%s/atlases/ara/annotation/annotation_hemi_%s_%dum.nii.gz' % (miracl_home, hemi, res)

    else:
        print("Reading input labels")
        nii = inlbls

    img = nib.load(nii)
    data = img.get_data()

    # load structure graph
    print("Reading ARA ontology structure_graph")
    arastrctcsv = "%s/atlases/ara/ara_mouse_structure_graph_hemi_split.csv" % miracl_home
    aragraph = pd.read_csv(arastrctcsv)

    # get lbls
    lbls = getalllbls(data)

    # loop over intensities
    parentdata = np.copy(data)

    print("Computing parent labels at depth %d" % d)

    pastparents = getpastlabelsdepth(aragraph, lbls, d, lblsplit, maxannotlbl)

    for pastlbl, pastparent in pastparents.iteritems():
        replacechildren(data, parentdata, pastlbl, pastparent)

    vx = img.header.get_zooms()[0]
    orgname = basename(nii).split('.')[0]
    outnii = '%s_depth_%s.nii.gz' % (orgname, d)
    saveniiparents(parentdata, vx, outnii)

    if inlbls == "Allen":
        # orient
        with add_paths():
            call(["c3d", "%s" % outnii, "-orient", "ASR", "-type", "ushort", "-o", "%s" % outnii])

            call(["c3d", "%s" % outnii, "-origin", "-11.4x0x0mm", "-o", "%s" % outnii])

    print ("\n Parent labels at depth done in %s ... Have a good day!\n" % (datetime.now() - starttime))
Esempio n. 4
0
def main(args=None):
    """ Main command line interface (CLI) call. Parse all arguments, perform required action.
    If no commands are passed, return to GUI."""
    if args is None:
        args = sys.argv[1:]

    # set miracl home
    if 'MIRACL_HOME' not in os.environ:
        cli_file = os.path.realpath(__file__)
        miracl_dir = Path(cli_file).parents[0]
        os.environ['MIRACL_HOME'] = '%s' % miracl_dir

    parser = get_parser()
    argcomplete.autocomplete(parser)
    args = parser.parse_args(args)

    with depends_manager.add_paths():
        args.func(parser, args)
Esempio n. 5
0
def main(args):
    starttime = datetime.now()

    # parse in args
    parser = parsefn()
    lbl, trans, projmet = parse_inputs(parser, args)

    [cutoff, miracl_home, annot_csv, exclude] = initialize()

    mcc = MouseConnectivityCache(
        manifest_file=
        '%s/connect/connectivity_exps/mouse_connectivity_manifest.json' %
        miracl_home)

    # Load all injection experiments from Allen api
    all_experiments = mcc.get_experiments(dataframe=True)
    # Filter to only wild-type strain

    if trans:

        print("\n Searching for experiments with the %s mouse line" % trans)

        projexps = all_experiments[all_experiments['transgenic-line'] == "%s" %
                                   trans]

    else:

        print(
            "\n Searching for experiments with the wild-type (C57BL/6J) strain"
        )

        # wild-type
        projexps = all_experiments[all_experiments['strain'] == "C57BL/6J"]

        # no transgenic mice
        projexps = projexps[projexps['transgenic-line'] == ""]

    # ---------------

    # Check if labels have injection exps
    print(
        "\n Checking if labels have injection experiments in the connectivity search"
    )

    inj_exp = projexps[projexps['structure-abbrev'] == lbl].id.index[0]

    while inj_exp is None:
        pid = annot_csv.parent_structure_id[annot_csv.id == lbl].values[0]
        inj_exp = projexps[projexps['structure-id'] == pid].id.index[0]

    # ---------------

    # Get projection density
    projd = getprojden(mcc, inj_exp)

    # lbl_abrv = annot_csv[annot_csv['id'] == lbl]['acronym'].values[0]
    # lbl_abrv = lbl_abrv[1:]  # drop 1st char

    print(
        "\n Downloading projection density volume for experiment %d of lbl %s"
        % (inj_exp, lbl))

    outpd = '%s_exp%s_projection_density_image.nii.gz' % (lbl, inj_exp)
    outtif = '%s_exp%s_projection_density_image.tif' % (lbl, inj_exp)
    # outind = '%s_injection_density.nii.gz' % experiment_id
    # outdm = '%s_binary_mask.nii.gz' % experiment_id

    vx = 0.025

    savenii(projd, vx, outpd)
    savetiff(projd, outtif)

    # orient
    with add_paths():
        call(["c3d", "%s" % outpd, "-orient", "ASR", "-o", "%s" % outpd])

    # savenii(ind, vx, outind)
    # savenii(dm, vx, outdm)

    # ---------------

    # Get connectivity graph
    # query structure connectivity from Allen API
    print(
        "\n Quering structural connectivity of injection labels in the Allen API & sorting by %s"
        % projmet)

    [all_connect_ids, all_norm_proj] = query_connect(inj_exp, cutoff, exclude,
                                                     mcc, projmet)

    # save csv
    export_connect_abv = saveconncsv(all_connect_ids, annot_csv, lbl, inj_exp,
                                     projmet)

    # compute & save proj map
    exportprojmap(all_norm_proj, export_connect_abv, lbl, inj_exp, projmet)

    print(
        "\n Downloading connectivity graph & projection map done in %s ... Have a good day!\n"
        % (datetime.now() - starttime))
Esempio n. 6
0
def main(args):
    # parse in args
    parser = parsefn()
    invol, lbls, outfile, sort, hemi, label_depth = parse_inputs(parser, args)

    # extract stats
    print(" Extracting stats from input volume using registered labels ...\n")

    # subprocess.check_call('ImageIntensityStatistics 3 %s %s > %s' % (invol, lbls, outfile), shell=True,
    #                       stdout=subprocess.PIPE,
    #                       stderr=subprocess.PIPE)

    with add_paths():
        subprocess.check_call('c3d %s %s -lstat > %s' % (invol, lbls, outfile), shell=True,
                            stdout=subprocess.PIPE,
                            stderr=subprocess.PIPE)

    # read fwf
    out_stats = pd.read_fwf('%s' % outfile)

    # read Allen ontology
    miracl_home = os.environ['MIRACL_HOME']

    # combined or split labels
    if hemi == "combined":
        annot_csv = pd.read_csv('%s/atlases/ara/ara_mouse_structure_graph_hemi_combined.csv' % miracl_home)
    else:
        annot_csv = pd.read_csv('%s/atlases/ara/ara_mouse_structure_graph_hemi_split.csv' % miracl_home)

    # extract labels at certain depth only
    if label_depth is not None:
        annot_csv = annot_csv[annot_csv.depth == label_depth]

    # Add label Name, Abrv, PathID

    # make dic
    name_dict = annot_csv.set_index('id')['name'].to_dict()
    acronym_dict = annot_csv.set_index('id')['acronym'].to_dict()
    pathid_dict = annot_csv.set_index('id')['structure_id_path'].to_dict()
    parent_dict = annot_csv.set_index('id')['parent_structure_id'].to_dict()

    # replace label info
    out_stats['name'] = out_stats.LabelID.replace(name_dict)
    out_stats['acronym'] = out_stats.LabelID.replace(acronym_dict)
    out_stats['parent'] = out_stats.LabelID.replace(parent_dict)
    out_stats['pathid'] = out_stats.LabelID.replace(pathid_dict)

    # sort data-frame
    out_stats = out_stats.sort_values([sort], ascending=False)
    # remove background
    out_stats = out_stats[out_stats['LabelID'] != 0]

    # re-oder columns with info then sorted column of choice
    cols = ['LabelID', 'acronym', 'name', 'parent', sort]
    df_cols = out_stats.columns.values
    all_cols = np.hstack([cols, df_cols])
    _, idx = np.unique(all_cols, return_index=True)
    columns = all_cols[np.sort(idx)]

    out_stats = out_stats[columns]

    # remove labels not in allen graph (prob interp errors!)
    out_stats = out_stats[~out_stats.name.apply(lambda x: np.isreal(x))]

    # save to csv
    out_stats.to_csv('%s' % outfile, index=False)