コード例 #1
0
ファイル: wiener.py プロジェクト: bbfrederick/rapidtide
def _procOneVoxelWiener(vox,
                        lagtc,
                        inittc,
                        rt_floatset=np.float64,
                        rt_floattype='float64'):
    thefit, R = tide_fit.mlregress(lagtc, inittc)
    fitcoff = rt_floatset(thefit[0, 1])
    datatoremove = rt_floatset(fitcoff * lagtc)
    return vox, rt_floatset(thefit[0, 0]), rt_floatset(R), rt_floatset(R * R), fitcoff, \
           rt_floatset(thefit[0, 1] / thefit[0, 0]), datatoremove, rt_floatset(inittc - datatoremove)
コード例 #2
0
def _procOneVoxelWiener(vox, lagtc, inittc, rt_floatset=np.float64, rt_floattype="float64"):
    thefit, R = tide_fit.mlregress(lagtc, inittc)
    fitcoff = rt_floatset(thefit[0, 1])
    datatoremove = rt_floatset(fitcoff * lagtc)
    return (
        vox,
        rt_floatset(thefit[0, 0]),
        rt_floatset(R),
        rt_floatset(R * R),
        fitcoff,
        rt_floatset(thefit[0, 1] / thefit[0, 0]),
        datatoremove,
        rt_floatset(inittc - datatoremove),
    )
コード例 #3
0
def confoundglm(
    data,
    regressors,
    debug=False,
    showprogressbar=True,
    reportstep=1000,
    rt_floatset=np.float64,
    rt_floattype="float64",
):
    r"""Filters multiple regressors out of an array of data

    Parameters
    ----------
    data : 2d numpy array
        A data array.  First index is the spatial dimension, second is the time (filtering) dimension.

    regressors: 2d numpy array
        The set of regressors to filter out of each timecourse.  The first dimension is the regressor number, second is the time (filtering) dimension:

    debug : boolean
        Print additional diagnostic information if True

    Returns
    -------
    """
    if debug:
        print("data shape:", data.shape)
        print("regressors shape:", regressors.shape)
    datatoremove = np.zeros(data.shape[1], dtype=rt_floattype)
    filtereddata = data * 0.0
    for i in range(data.shape[0]):
        if showprogressbar and (i > 0) and (i % reportstep == 0
                                            or i == data.shape[0] - 1):
            tide_util.progressbar(i + 1,
                                  data.shape[0],
                                  label="Percent complete")
        datatoremove *= 0.0
        thefit, R = tide_fit.mlregress(regressors, data[i, :])
        if i == 0 and debug:
            print("fit shape:", thefit.shape)
        for j in range(regressors.shape[0]):
            datatoremove += rt_floatset(
                rt_floatset(thefit[0, 1 + j]) * regressors[j, :])
        filtereddata[i, :] = data[i, :] - datatoremove
    return filtereddata
コード例 #4
0
def _procOneItemGLM(vox,
                    theevs,
                    thedata,
                    rt_floatset=np.float64,
                    rt_floattype="float64"):
    thefit, R = tide_fit.mlregress(theevs, thedata)
    fitcoeff = rt_floatset(thefit[0, 1])
    datatoremove = rt_floatset(fitcoeff * theevs)
    return (
        vox,
        rt_floatset(thefit[0, 0]),
        rt_floatset(R),
        rt_floatset(R * R),
        fitcoeff,
        rt_floatset(thefit[0, 1] / thefit[0, 0]),
        datatoremove,
        rt_floatset(thedata - datatoremove),
    )
コード例 #5
0
ファイル: glmpass.py プロジェクト: bbfrederick/rapidtide
def confoundglm(data,
                 regressors,
                 debug=False,
                 showprogressbar=True,
                 reportstep=1000,
                 rt_floatset=np.float64,
                 rt_floattype='float64'):
    r"""Filters multiple regressors out of an array of data in place

    Parameters
    ----------
    data : 2d numpy array
        A data array.  First index is the spatial dimension, second is the time (filtering) dimension.

    regressors: 2d numpy array
        The set of regressors to filter out of each timecourse.  The first dimension is the regressor number, second is the time (filtering) dimension:

    debug : boolean
        Print additional diagnostic information if True

    Returns
    -------
    """
    if debug:
        print('data shape:', data.shape)
        print('regressors shape:', regressors.shape)
    datatoremove = np.zeros(data.shape[1], dtype=rt_floattype)
    filtereddata = data * 0.0
    for i in range(data.shape[0]):
        if showprogressbar and (i > 0) and (i % reportstep == 0 or i == data.shape[0] - 1):
            tide_util.progressbar(i + 1, data.shape[0], label='Percent complete')
        datatoremove *= 0.0
        thefit, R = tide_fit.mlregress(regressors, data[i, :])
        if i == 0 and debug:
            print('fit shape:', thefit.shape)
        for j in range(regressors.shape[0]):
            datatoremove += rt_floatset(rt_floatset(thefit[0, 1 + j]) * regressors[j, :])
        filtereddata[i, :] = data[i, :] - datatoremove
    return filtereddata
コード例 #6
0
def glmfilt(inputfile, numskip, outputroot, evfilename):

    # initialize some variables
    evdata = []
    evisnifti = []
    thedims_in = []
    thedims_ev = []
    thesizes_ev = []

    # read the datafile and the evfiles
    nim_input, nim_data, nim_header, thedims_in, thesizes_in = tide_io.readfromnifti(inputfile)
    xdim, ydim, slicedim, tr = tide_io.parseniftisizes(thesizes_in)
    print(xdim, ydim, slicedim, tr)
    xsize, ysize, numslices, timepoints = tide_io.parseniftidims(thedims_in)
    print(xsize, ysize, numslices, timepoints)

    numregressors = 0
    for i in range(0, len(evfilename)):
        print("file ", i, " has name ", evfilename[i])
        # check to see if file is nifti or text
        fileisnifti = tide_io.checkifnifti(evfilename[i])
        fileisparfile = tide_io.checkifparfile(evfilename[i])
        if fileisnifti:
            # if file is nifti
            print("reading voxel specific regressor from ", evfilename[i])
            (
                nim_evinput,
                ev_data,
                ev_header,
                thedims_evinput,
                thesizes_evinput,
            ) = tide_io.readfromnifti(evfilename[i])
            evisnifti.append(True)
            evdata.append(1.0 * ev_data)
            thedims_ev.append(thedims_evinput)
            thesizes_ev.append(thesizes_evinput)
            numregressors += 1
        elif fileisparfile:
            # check to see if file a par file
            print("reading 6 global regressors from an FSL parfile")
            evtimeseries = tide_io.readvecs(evfilename[i])
            print("timeseries length = ", len(evtimeseries[0, :]))
            for j in range(0, 6):
                evisnifti.append(False)
                evdata.append(1.0 * evtimeseries[j, :])
                thedims_evinput = 1.0 * thedims_in
                thesizes_evinput = 1.0 * thesizes_in
                thedims_ev.append(thedims_evinput)
                thesizes_ev.append(thesizes_evinput)
                numregressors += 1
        else:
            # if file is text
            print("reading global regressor from ", evfilename[i])
            evtimeseries = tide_io.readvec(evfilename[i])
            print("timeseries length = ", len(evtimeseries))
            evisnifti.append(False)
            evdata.append(1.0 * evtimeseries)
            thedims_evinput = 1.0 * thedims_in
            thesizes_evinput = 1.0 * thesizes_in
            thedims_ev.append(thedims_evinput)
            thesizes_ev.append(thesizes_evinput)
            numregressors += 1

    for j in range(0, numregressors):
        for i in range(0, 4):
            if thedims_in[i] != thedims_ev[j][i]:
                print("Input file and ev file ", j, " dimensions do not match")
                print("dimension ", i, ":", thedims_in[i], " != ", thedims_ev[j][i])
                exit()
        if timepoints - numskip != thedims_ev[j][4]:
            print("Input file and ev file ", j, " dimensions do not match")
            print("dimension ", 4, ":", timepoints, "!= ", thedims_ev[j][4], "+", numskip)
            exit()

    print("will perform GLM with ", numregressors, " regressors")
    meandata = np.zeros((xsize, ysize, numslices), dtype="float")
    fitdata = np.zeros((xsize, ysize, numslices, numregressors), dtype="float")
    Rdata = np.zeros((xsize, ysize, numslices), dtype="float")
    trimmeddata = 1.0 * nim_data[:, :, :, numskip:]

    for z in range(0, numslices):
        print("processing slice ", z)
        for y in range(0, ysize):
            for x in range(0, xsize):
                regressorvec = []
                for j in range(0, numregressors):
                    if evisnifti[j]:
                        regressorvec.append(evdata[j][x, y, z, :])
                    else:
                        regressorvec.append(evdata[j])
                if np.max(trimmeddata[x, y, z, :]) - np.min(trimmeddata[x, y, z, :]) > 0.0:
                    thefit, R = tide_fit.mlregress(regressorvec, trimmeddata[x, y, z, :])
                    meandata[x, y, z] = thefit[0, 0]
                    Rdata[x, y, z] = R
                    for j in range(0, numregressors):
                        fitdata[x, y, z, j] = thefit[0, j + 1]
                        # datatoremove[x, y, z, :, j] = thefit[0, j + 1] * regressorvec[j]
                else:
                    meandata[x, y, z] = 0.0
                    Rdata[x, y, z] = 0.0
                    for j in range(0, numregressors):
                        fitdata[x, y, z, j] = 0.0
                        # datatoremove[x, y, z, :, j] = 0.0 * regressorvec[j]
                # totaltoremove[x, y, z, :] = np.sum(datatoremove[x, y, z, :, :], axis=1)
                # filtereddata[x, y, z, :] = trimmeddata[x, y, z, :] - totaltoremove[x, y, z, :]

    # first save the things with a small numbers of timepoints
    print("fitting complete: about to save the fit data")
    theheader = nim_header
    theheader["dim"][4] = 1
    tide_io.savetonifti(meandata, theheader, outputroot + "_mean")
    for j in range(0, numregressors):
        tide_io.savetonifti(fitdata[:, :, :, j], theheader, outputroot + "_fit" + str(j))
    tide_io.savetonifti(Rdata, theheader, outputroot + "_R")
    Rdata = None

    print()
    print("Now constructing the array of data to remove")
    # datatoremove = np.zeros((xsize, ysize, numslices, timepoints - numskip, numregressors), dtype='float')
    totaltoremove = np.zeros((xsize, ysize, numslices, timepoints - numskip), dtype="float")
    # filtereddata = 1.0 * totaltoremove
    for z in range(0, numslices):
        print("processing slice ", z)
        for y in range(0, ysize):
            for x in range(0, xsize):
                if np.max(trimmeddata[x, y, z, :]) - np.min(trimmeddata[x, y, z, :]) > 0.0:
                    for j in range(0, numregressors):
                        totaltoremove[x, y, z, :] += fitdata[x, y, z, j] * regressorvec[j]
                else:
                    totaltoremove[x, y, z, :] = 0.0
    print("Array construction done.  Saving files")

    # now save the things with full timecourses
    theheader = nim_header
    theheader["dim"][4] = timepoints - numskip
    tide_io.savetonifti(totaltoremove, theheader, outputroot + "_totaltoremove")
    filtereddata = trimmeddata - totaltoremove
    totaltoremove = None
    tide_io.savetonifti(trimmeddata, theheader, outputroot + "_trimmed")
    trimmeddata = None
    tide_io.savetonifti(filtereddata, theheader, outputroot + "_filtered")
コード例 #7
0
def refineregressor(
    fmridata,
    fmritr,
    shiftedtcs,
    weights,
    passnum,
    lagstrengths,
    lagtimes,
    lagsigma,
    lagmask,
    R2,
    theprefilter,
    optiondict,
    padtrs=60,
    bipolar=False,
    includemask=None,
    excludemask=None,
    debug=False,
    rt_floatset=np.float64,
    rt_floattype="float64",
):
    """

    Parameters
    ----------
    fmridata : 4D numpy float array
       fMRI data
    fmritr : float
        Data repetition rate, in seconds
    shiftedtcs : 4D numpy float array
        Time aligned voxel timecourses
    weights :  unknown
        unknown
    passnum : int
        Number of the pass (for labelling output)
    lagstrengths : 3D numpy float array
        Maximum correlation coefficient in every voxel
    lagtimes : 3D numpy float array
        Time delay of maximum crosscorrelation in seconds
    lagsigma : 3D numpy float array
        Gaussian width of the crosscorrelation peak, in seconds.
    lagmask : 3D numpy float array
        Mask of voxels with successful correlation fits.
    R2 : 3D numpy float array
        Square of the maximum correlation coefficient in every voxel
    theprefilter : function
        The filter function to use
    optiondict : dict
        Dictionary of all internal rapidtide configuration variables.
    padtrs : int, optional
        Number of timepoints to pad onto each end
    includemask : 3D array
        Mask of voxels to include in refinement.  Default is None (all voxels).
    excludemask : 3D array
        Mask of voxels to exclude from refinement.  Default is None (no voxels).
    debug : bool
        Enable additional debugging output.  Default is False
    rt_floatset : function
        Function to coerce variable types
    rt_floattype : {'float32', 'float64'}
        Data type for internal variables

    Returns
    -------
    volumetotal : int
        Number of voxels processed
    outputdata : float array
        New regressor
    maskarray : 3D array
        Mask of voxels used for refinement
    """
    inputshape = np.shape(fmridata)
    if optiondict["ampthresh"] < 0.0:
        if bipolar:
            theampthresh = tide_stats.getfracval(np.fabs(lagstrengths),
                                                 -optiondict["ampthresh"],
                                                 nozero=True)
        else:
            theampthresh = tide_stats.getfracval(lagstrengths,
                                                 -optiondict["ampthresh"],
                                                 nozero=True)
        print(
            "setting ampthresh to the",
            -100.0 * optiondict["ampthresh"],
            "th percentile (",
            theampthresh,
            ")",
        )
    else:
        theampthresh = optiondict["ampthresh"]
    if bipolar:
        ampmask = np.where(
            np.fabs(lagstrengths) >= theampthresh, np.int16(1), np.int16(0))
    else:
        ampmask = np.where(lagstrengths >= theampthresh, np.int16(1),
                           np.int16(0))
    if optiondict["lagmaskside"] == "upper":
        delaymask = np.where(
            (lagtimes - optiondict["offsettime"]) > optiondict["lagminthresh"],
            np.int16(1),
            np.int16(0),
        ) * np.where(
            (lagtimes - optiondict["offsettime"]) < optiondict["lagmaxthresh"],
            np.int16(1),
            np.int16(0),
        )
    elif optiondict["lagmaskside"] == "lower":
        delaymask = np.where(
            (lagtimes - optiondict["offsettime"]) <
            -optiondict["lagminthresh"],
            np.int16(1),
            np.int16(0),
        ) * np.where(
            (lagtimes - optiondict["offsettime"]) >
            -optiondict["lagmaxthresh"],
            np.int16(1),
            np.int16(0),
        )
    else:
        abslag = abs(lagtimes) - optiondict["offsettime"]
        delaymask = np.where(abslag > optiondict["lagminthresh"], np.int16(1),
                             np.int16(0)) * np.where(
                                 abslag < optiondict["lagmaxthresh"],
                                 np.int16(1), np.int16(0))
    sigmamask = np.where(lagsigma < optiondict["sigmathresh"], np.int16(1),
                         np.int16(0))
    locationmask = lagmask + 0
    if includemask is not None:
        locationmask = locationmask * includemask
    if excludemask is not None:
        locationmask = locationmask * (1 - excludemask)
    locationmask = locationmask.astype(np.int16)
    print("location mask created")

    # first generate the refine mask
    locationfails = np.sum(1 - locationmask)
    ampfails = np.sum(1 - ampmask * locationmask)
    lagfails = np.sum(1 - delaymask * locationmask)
    sigmafails = np.sum(1 - sigmamask * locationmask)
    refinemask = locationmask * ampmask * delaymask * sigmamask
    if tide_stats.getmasksize(refinemask) == 0:
        print("ERROR: no voxels in the refine mask:")
        print(
            "\n	",
            locationfails,
            " locationfails",
            "\n	",
            ampfails,
            " ampfails",
            "\n	",
            lagfails,
            " lagfails",
            "\n	",
            sigmafails,
            " sigmafails",
        )
        if (includemask is None) and (excludemask is None):
            print("\nRelax ampthresh, delaythresh, or sigmathresh - exiting")
        else:
            print(
                "\nChange include/exclude masks or relax ampthresh, delaythresh, or sigmathresh - exiting"
            )
        return 0, None, None, locationfails, ampfails, lagfails, sigmafails

    if optiondict["cleanrefined"]:
        shiftmask = locationmask
    else:
        shiftmask = refinemask
    volumetotal = np.sum(shiftmask)
    reportstep = 1000

    # timeshift the valid voxels
    if optiondict["nprocs"] > 1:
        # define the consumer function here so it inherits most of the arguments
        def timeshift_consumer(inQ, outQ):
            while True:
                try:
                    # get a new message
                    val = inQ.get()

                    # this is the 'TERM' signal
                    if val is None:
                        break

                    # process and send the data
                    outQ.put(
                        _procOneVoxelTimeShift(
                            val,
                            fmridata[val, :],
                            lagstrengths[val],
                            R2[val],
                            lagtimes[val],
                            padtrs,
                            fmritr,
                            theprefilter,
                            optiondict["fmrifreq"],
                            refineprenorm=optiondict["refineprenorm"],
                            lagmaxthresh=optiondict["lagmaxthresh"],
                            refineweighting=optiondict["refineweighting"],
                            detrendorder=optiondict["detrendorder"],
                            offsettime=optiondict["offsettime"],
                            filterbeforePCA=optiondict["filterbeforePCA"],
                            psdfilter=optiondict["psdfilter"],
                            rt_floatset=rt_floatset,
                            rt_floattype=rt_floattype,
                        ))

                except Exception as e:
                    print("error!", e)
                    break

        data_out = tide_multiproc.run_multiproc(
            timeshift_consumer,
            inputshape,
            shiftmask,
            nprocs=optiondict["nprocs"],
            showprogressbar=True,
            chunksize=optiondict["mp_chunksize"],
        )

        # unpack the data
        psdlist = []
        for voxel in data_out:
            shiftedtcs[voxel[0], :] = voxel[1]
            weights[voxel[0], :] = voxel[2]
            if optiondict["psdfilter"]:
                psdlist.append(voxel[3])
        del data_out

    else:
        psdlist = []
        for vox in range(0, inputshape[0]):
            if (vox % reportstep == 0 or vox
                    == inputshape[0] - 1) and optiondict["showprogressbar"]:
                tide_util.progressbar(vox + 1,
                                      inputshape[0],
                                      label="Percent complete (timeshifting)")
            if shiftmask[vox] > 0.5:
                retvals = _procOneVoxelTimeShift(
                    vox,
                    fmridata[vox, :],
                    lagstrengths[vox],
                    R2[vox],
                    lagtimes[vox],
                    padtrs,
                    fmritr,
                    theprefilter,
                    optiondict["fmrifreq"],
                    refineprenorm=optiondict["refineprenorm"],
                    lagmaxthresh=optiondict["lagmaxthresh"],
                    refineweighting=optiondict["refineweighting"],
                    detrendorder=optiondict["detrendorder"],
                    offsettime=optiondict["offsettime"],
                    filterbeforePCA=optiondict["filterbeforePCA"],
                    psdfilter=optiondict["psdfilter"],
                    rt_floatset=rt_floatset,
                    rt_floattype=rt_floattype,
                )
                shiftedtcs[retvals[0], :] = retvals[1]
                weights[retvals[0], :] = retvals[2]
                if optiondict["psdfilter"]:
                    psdlist.append(retvals[3])
        print()

    if optiondict["psdfilter"]:
        print(len(psdlist))
        print(psdlist[0])
        print(np.shape(np.asarray(psdlist, dtype=rt_floattype)))
        averagepsd = np.mean(np.asarray(psdlist, dtype=rt_floattype), axis=0)
        stdpsd = np.std(np.asarray(psdlist, dtype=rt_floattype), axis=0)
        snr = np.nan_to_num(averagepsd / stdpsd)

    # now generate the refined timecourse(s)
    validlist = np.where(refinemask > 0)[0]
    refinevoxels = shiftedtcs[validlist, :]
    if bipolar:
        for thevoxel in range(len(validlist)):
            if lagstrengths[validlist][thevoxel] < 0.0:
                refinevoxels[thevoxel, :] *= -1.0
    refineweights = weights[validlist]
    weightsum = np.sum(refineweights, axis=0) / volumetotal
    averagedata = np.sum(refinevoxels, axis=0) / volumetotal
    if optiondict["cleanrefined"]:
        invalidlist = np.where((1 - ampmask) > 0)[0]
        discardvoxels = shiftedtcs[invalidlist]
        discardweights = weights[invalidlist]
        discardweightsum = np.sum(discardweights, axis=0) / volumetotal
        averagediscard = np.sum(discardvoxels, axis=0) / volumetotal
    if optiondict["dodispersioncalc"]:
        print("splitting regressors by time lag for phase delay estimation")
        laglist = np.arange(
            optiondict["dispersioncalc_lower"],
            optiondict["dispersioncalc_upper"],
            optiondict["dispersioncalc_step"],
        )
        dispersioncalcout = np.zeros((np.shape(laglist)[0], inputshape[1]),
                                     dtype=rt_floattype)
        fftlen = int(inputshape[1] // 2)
        fftlen -= fftlen % 2
        dispersioncalcspecmag = np.zeros((np.shape(laglist)[0], fftlen),
                                         dtype=rt_floattype)
        dispersioncalcspecphase = np.zeros((np.shape(laglist)[0], fftlen),
                                           dtype=rt_floattype)
        for lagnum in range(0, np.shape(laglist)[0]):
            lower = laglist[lagnum] - optiondict["dispersioncalc_step"] / 2.0
            upper = laglist[lagnum] + optiondict["dispersioncalc_step"] / 2.0
            inlagrange = np.where(
                locationmask * ampmask *
                np.where(lower < lagtimes, np.int16(1), np.int16(0)) *
                np.where(lagtimes < upper, np.int16(1), np.int16(0)))[0]
            print(
                "    summing",
                np.shape(inlagrange)[0],
                "regressors with lags from",
                lower,
                "to",
                upper,
            )
            if np.shape(inlagrange)[0] > 0:
                dispersioncalcout[lagnum, :] = tide_math.corrnormalize(
                    np.mean(shiftedtcs[inlagrange], axis=0),
                    detrendorder=optiondict["detrendorder"],
                    windowfunc=optiondict["windowfunc"],
                )
                (
                    freqs,
                    dispersioncalcspecmag[lagnum, :],
                    dispersioncalcspecphase[lagnum, :],
                ) = tide_math.polarfft(dispersioncalcout[lagnum, :],
                                       1.0 / fmritr)
            inlagrange = None
        tide_io.writenpvecs(
            dispersioncalcout,
            optiondict["outputname"] + "_dispersioncalcvecs_pass" +
            str(passnum) + ".txt",
        )
        tide_io.writenpvecs(
            dispersioncalcspecmag,
            optiondict["outputname"] + "_dispersioncalcspecmag_pass" +
            str(passnum) + ".txt",
        )
        tide_io.writenpvecs(
            dispersioncalcspecphase,
            optiondict["outputname"] + "_dispersioncalcspecphase_pass" +
            str(passnum) + ".txt",
        )
        tide_io.writenpvecs(
            freqs,
            optiondict["outputname"] + "_dispersioncalcfreqs_pass" +
            str(passnum) + ".txt",
        )

    if optiondict["pcacomponents"] < 0.0:
        pcacomponents = "mle"
    elif optiondict["pcacomponents"] >= 1.0:
        pcacomponents = int(np.round(optiondict["pcacomponents"]))
    elif optiondict["pcacomponents"] == 0.0:
        print("0.0 is not an allowed value for pcacomponents")
        sys.exit()
    else:
        pcacomponents = optiondict["pcacomponents"]
    icacomponents = 1

    if optiondict["refinetype"] == "ica":
        print("performing ica refinement")
        thefit = FastICA(n_components=icacomponents).fit(
            refinevoxels)  # Reconstruct signals
        print("Using first of ", len(thefit.components_), " components")
        icadata = thefit.components_[0]
        filteredavg = tide_math.corrnormalize(
            theprefilter.apply(optiondict["fmrifreq"], averagedata),
            detrendorder=optiondict["detrendorder"],
        )
        filteredica = tide_math.corrnormalize(
            theprefilter.apply(optiondict["fmrifreq"], icadata),
            detrendorder=optiondict["detrendorder"],
        )
        thepxcorr = pearsonr(filteredavg, filteredica)[0]
        print("ica/avg correlation = ", thepxcorr)
        if thepxcorr > 0.0:
            outputdata = 1.0 * icadata
        else:
            outputdata = -1.0 * icadata
    elif optiondict["refinetype"] == "pca":
        # use the method of "A novel perspective to calibrate temporal delays in cerebrovascular reactivity
        # using hypercapnic and hyperoxic respiratory challenges". NeuroImage 187, 154?165 (2019).
        print("performing pca refinement with pcacomponents set to",
              pcacomponents)
        try:
            thefit = PCA(n_components=pcacomponents).fit(refinevoxels)
        except ValueError:
            if pcacomponents == "mle":
                print(
                    "mle estimation failed - falling back to pcacomponents=0.8"
                )
                thefit = PCA(n_components=0.8).fit(refinevoxels)
            else:
                print("unhandled math exception in PCA refinement - exiting")
                sys.exit()
        print(
            "Using ",
            len(thefit.components_),
            " component(s), accounting for ",
            "{:.2f}% of the variance".format(100.0 * np.cumsum(
                thefit.explained_variance_ratio_)[len(thefit.components_) -
                                                  1]),
        )
        reduceddata = thefit.inverse_transform(thefit.transform(refinevoxels))
        if debug:
            print("complex processing: reduceddata.shape =", reduceddata.shape)
        pcadata = np.mean(reduceddata, axis=0)
        filteredavg = tide_math.corrnormalize(
            theprefilter.apply(optiondict["fmrifreq"], averagedata),
            detrendorder=optiondict["detrendorder"],
        )
        filteredpca = tide_math.corrnormalize(
            theprefilter.apply(optiondict["fmrifreq"], pcadata),
            detrendorder=optiondict["detrendorder"],
        )
        thepxcorr = pearsonr(filteredavg, filteredpca)[0]
        print("pca/avg correlation = ", thepxcorr)
        if thepxcorr > 0.0:
            outputdata = 1.0 * pcadata
        else:
            outputdata = -1.0 * pcadata
    elif optiondict["refinetype"] == "weighted_average":
        print("performing weighted averaging refinement")
        outputdata = np.nan_to_num(averagedata / weightsum)
    else:
        print("performing unweighted averaging refinement")
        outputdata = averagedata

    if optiondict["cleanrefined"]:
        thefit, R = tide_fit.mlregress(averagediscard, averagedata)
        fitcoff = rt_floatset(thefit[0, 1])
        datatoremove = rt_floatset(fitcoff * averagediscard)
        outputdata -= datatoremove
    print()
    print(
        "Timeshift applied to " + str(int(volumetotal)) + " voxels, " +
        str(len(validlist)) + " used for refinement:",
        "\n	",
        locationfails,
        " locationfails",
        "\n	",
        ampfails,
        " ampfails",
        "\n	",
        lagfails,
        " lagfails",
        "\n	",
        sigmafails,
        " sigmafails",
    )

    if optiondict["psdfilter"]:
        outputdata = tide_filt.transferfuncfilt(outputdata, snr)

    # garbage collect
    collected = gc.collect()
    print("Garbage collector: collected %d objects." % collected)

    return volumetotal, outputdata, refinemask, locationfails, ampfails, lagfails, sigmafails
コード例 #8
0
def polyfitim(
    datafile,
    datamask,
    templatefile,
    templatemask,
    outputroot,
    regionatlas=None,
    order=1,
):

    # read in data
    print("reading in data arrays")
    (
        datafile_img,
        datafile_data,
        datafile_hdr,
        datafiledims,
        datafilesizes,
    ) = tide_io.readfromnifti(datafile)
    (
        datamask_img,
        datamask_data,
        datamask_hdr,
        datamaskdims,
        datamasksizes,
    ) = tide_io.readfromnifti(datamask)
    (
        templatefile_img,
        templatefile_data,
        templatefile_hdr,
        templatefiledims,
        templatefilesizes,
    ) = tide_io.readfromnifti(templatefile)
    (
        templatemask_img,
        templatemask_data,
        templatemask_hdr,
        templatemaskdims,
        templatemasksizes,
    ) = tide_io.readfromnifti(templatemask)

    if regionatlas is not None:
        (
            regionatlas_img,
            regionatlas_data,
            regionatlas_hdr,
            regionatlasdims,
            regionatlassizes,
        ) = tide_io.readfromnifti(regionatlas)

    xsize = datafiledims[1]
    ysize = datafiledims[2]
    numslices = datafiledims[3]
    timepoints = datafiledims[4]

    # check dimensions
    print("checking dimensions")
    if not tide_io.checkspacedimmatch(datafiledims, datamaskdims):
        print("input mask spatial dimensions do not match image")
        exit()
    if datamaskdims[4] == 1:
        print("using 3d data mask")
        datamask3d = True
    else:
        datamask3d = False
        if not tide_io.checktimematch(datafiledims, datamaskdims):
            print("input mask time dimension does not match image")
            exit()
    if not tide_io.checkspacedimmatch(datafiledims, templatefiledims):
        print(templatefiledims,
              "template file spatial dimensions do not match image")
        exit()
    if not templatefiledims[4] == 1:
        print("template file time dimension is not equal to 1")
        exit()
    if not tide_io.checkspacedimmatch(datafiledims, templatemaskdims):
        print("template mask spatial dimensions do not match image")
        exit()
    if not templatemaskdims[4] == 1:
        print("template mask time dimension is not equal to 1")
        exit()
    if regionatlas is not None:
        if not tide_io.checkspacedimmatch(datafiledims, regionatlasdims):
            print("template mask spatial dimensions do not match image")
            exit()
        if not regionatlasdims[4] == 1:
            print("regionatlas time dimension is not equal to 1")
            exit()

    # allocating arrays
    print("allocating arrays")
    numspatiallocs = int(xsize) * int(ysize) * int(numslices)
    rs_datafile = datafile_data.reshape((numspatiallocs, timepoints))
    if datamask3d:
        rs_datamask = datamask_data.reshape(numspatiallocs)
    else:
        rs_datamask = datamask_data.reshape((numspatiallocs, timepoints))
    rs_datamask_bin = np.where(rs_datamask > 0.9, 1.0, 0.0)
    rs_templatefile = templatefile_data.reshape(numspatiallocs)
    rs_templatemask = templatemask_data.reshape(numspatiallocs)
    rs_templatemask_bin = np.where(rs_templatemask > 0.1, 1.0, 0.0)
    if regionatlas is not None:
        rs_regionatlas = regionatlas_data.reshape(numspatiallocs)
        numregions = int(np.max(rs_regionatlas))

    fitdata = np.zeros((numspatiallocs, timepoints), dtype="float")
    # residuals = np.zeros((numspatiallocs, timepoints), dtype='float')
    # newtemplate = np.zeros((numspatiallocs), dtype='float')
    # newmask = np.zeros((numspatiallocs), dtype='float')
    if regionatlas is not None:
        lincoffs = np.zeros((numregions, timepoints), dtype="float")
        sqrcoffs = np.zeros((numregions, timepoints), dtype="float")
        offsets = np.zeros((numregions, timepoints), dtype="float")
        rvals = np.zeros((numregions, timepoints), dtype="float")
    else:
        lincoffs = np.zeros(timepoints, dtype="float")
        sqrcoffs = np.zeros(timepoints, dtype="float")
        offsets = np.zeros(timepoints, dtype="float")
        rvals = np.zeros(timepoints, dtype="float")

    if regionatlas is not None:
        print("making region masks")
        regionvoxels = np.zeros((numspatiallocs, numregions), dtype="float")
        for region in range(0, numregions):
            thisregion = np.where((rs_regionatlas *
                                   rs_templatemask_bin) == (region + 1))
            regionvoxels[thisregion, region] = 1.0

    # mask everything
    print("masking template")
    maskedtemplate = rs_templatefile * rs_templatemask_bin

    # cycle over all images
    print("now cycling over all images")
    for thetime in range(0, timepoints):
        print("fitting timepoint", thetime)

        # get the appropriate mask
        if datamask3d:
            for i in range(timepoints):
                thisdatamask = rs_datamask_bin
        else:
            thisdatamask = rs_datamask_bin[:, thetime]
        if regionatlas is not None:
            for region in range(0, numregions):
                voxelstofit = np.where(
                    regionvoxels[:, region] * thisdatamask > 0.5)
                voxelstoreconstruct = np.where(regionvoxels[:, region] > 0.5)
                if order == 2:
                    thefit, R = tide_fit.mlregress(
                        [
                            rs_templatefile[voxelstofit],
                            np.square(rs_templatefile[voxelstofit]),
                        ],
                        rs_datafile[voxelstofit, thetime][0],
                    )
                else:
                    thefit, R = tide_fit.mlregress(
                        rs_templatefile[voxelstofit],
                        rs_datafile[voxelstofit, thetime][0],
                    )
                lincoffs[region, thetime] = thefit[0, 1]
                offsets[region, thetime] = thefit[0, 0]
                rvals[region, thetime] = R
                if order == 2:
                    sqrcoffs[region, thetime] = thefit[0, 2]
                    fitdata[voxelstoreconstruct, thetime] += (
                        sqrcoffs[region, thetime] *
                        np.square(rs_templatefile[voxelstoreconstruct]) +
                        lincoffs[region, thetime] *
                        rs_templatefile[voxelstoreconstruct] +
                        offsets[region, thetime])
                else:
                    fitdata[voxelstoreconstruct,
                            thetime] += (lincoffs[region, thetime] *
                                         rs_templatefile[voxelstoreconstruct] +
                                         offsets[region, thetime])
                # newtemplate += nan_to_num(maskeddata[:, thetime] / lincoffs[region, thetime]) * rs_datamask
                # newmask += rs_datamask * rs_templatemask_bin
        else:
            voxelstofit = np.where(thisdatamask > 0.5)
            voxelstoreconstruct = np.where(rs_templatemask > 0.5)
            thefit, R = tide_fit.mlregress(
                rs_templatefile[voxelstofit], rs_datafile[voxelstofit,
                                                          thetime][0])
            lincoffs[thetime] = thefit[0, 1]
            offsets[thetime] = thefit[0, 0]
            rvals[thetime] = R
            fitdata[voxelstoreconstruct, thetime] = (
                lincoffs[thetime] * rs_templatefile[voxelstoreconstruct] +
                offsets[thetime])
            # if datamask3d:
            #    newtemplate += nan_to_num(maskeddata[:, thetime] / lincoffs[thetime]) * rs_datamask
            # else:
            #    newtemplate += nan_to_num(maskeddata[:, thetime] / lincoffs[thetime]) * rs_datamask[:, thetime]
            # newmask += rs_datamask[:, thetime] * rs_templatemask_bin
    residuals = rs_datafile - fitdata

    # write out the data files
    print("writing time series")
    if order == 2:
        tide_io.writenpvecs(sqrcoffs, outputroot + "_sqrcoffs.txt")
    tide_io.writenpvecs(lincoffs, outputroot + "_lincoffs.txt")
    tide_io.writenpvecs(offsets, outputroot + "_offsets.txt")
    tide_io.writenpvecs(rvals, outputroot + "_rvals.txt")
    if regionatlas is not None:
        for region in range(0, numregions):
            print(
                "region",
                region + 1,
                "slope mean, std:",
                np.mean(lincoffs[:, region]),
                np.std(lincoffs[:, region]),
            )
            print(
                "region",
                region + 1,
                "offset mean, std:",
                np.mean(offsets[:, region]),
                np.std(offsets[:, region]),
            )
    else:
        print("slope mean, std:", np.mean(lincoffs), np.std(lincoffs))
        print("offset mean, std:", np.mean(offsets), np.std(offsets))

    print("writing nifti series")
    tide_io.savetonifti(
        fitdata.reshape((xsize, ysize, numslices, timepoints)),
        datafile_hdr,
        outputroot + "_fit",
    )
    tide_io.savetonifti(
        residuals.reshape((xsize, ysize, numslices, timepoints)),
        datafile_hdr,
        outputroot + "_residuals",
    )
コード例 #9
0
ファイル: showxcorrx.py プロジェクト: bbfrederick/rapidtide
def showxcorrx_workflow(infilename1, infilename2, Fs,
                        thelabel='', starttime=0., duration=1000000.,
                        searchrange=15.,
                        display=True, trimdata=False,
                        summarymode=False, labelline=False,
                        flipregressor=False, windowfunc='hamming',
                        calccepstraldelay=False, corroutputfile=False,
                        controlvariablefile=None, numreps=0,
                        arbvec=None, filtertype='arb', corrweighting='none',
                        detrendorder=1, prewindow=True, verbose=False):
    r"""Calculate and display crosscorrelation between two timeseries.

    Parameters
    ----------
    infilename1 : str
        The name of a text file containing a timeseries, one timepoint per line.
    infilename2 : str
        The name of a text file containing a timeseries, one timepoint per line.
    Fs : float
        The sample rate of the time series, in Hz.
    thelabel : str, optional
        The label for the output graph.  Default is blank.
    starttime : float, optional
        Time offset into the timeseries, in seconds, to start using the time data.  Default is 0
    duration : float, optional
        Length of time from each time series, in seconds, to use for the cross-correlation.  Default is the entire time series.
    searchrange : float, optional
        Only search for cross-correlation peaks between -searchrange and +searchrange seconds (default is 15).
    display : bool, optional
        Plot cross-correlation function in a matplotlib window.  Default is True.
    trimdata : bool, optional
        Trim time series to the length of the shorter series.  Default is False.
    summarymode : bool, optional
        Output a table of interesting results for later processing.  Default is False.
    labelline : bool, optional
        Print an explanatory header line over the summary information.  Default is False.
    flipregressor : bool, optional
        Invert timeseries 2 prior to cross-correlation.
    windowfunc : {'hamming', 'hann', 'blackmanharris'}
        Window function to apply prior to cross-correlation.  Default is 'hamming'.
    calccepstraldelay : bool, optional
        Use cepstral estimation of delay.  Default is False.
    corroutputfile : bool, optional
        Save the correlation function to a file.  Default is False.
    controlvariablefile : bool, optional
        Save internal variables to a text file.  Default is False.
    numreps : int, optional
        Number of null correlations to perform to estimate significance.  Default is 10000
    arbvec : [float,float,float,float], optional
        Frequency limits of the arb_pass filter.
    filtertype : 'none', 'card', 'lfo', 'vlf', 'resp', 'arb'
        Type of filter to apply data prior to correlation.  Default is 'none'
    corrweighting : {'none', 'Liang', 'Eckart', 'PHAT'}, optional
         Weighting function to apply to the crosscorrelation in the Fourier domain.  Default is 'none'
    detrendorder : int, optional
       Order of polynomial used to detrend crosscorrelation inputs.  Default is 1 (0 disables)
    prewindow : bool, optional
        Apply window function prior to cross-correlation.  Default is True.
    verbose : bool, optional
        Print internal status information.  Default is False.

    Notes
    -----
    This workflow writes out several files:

    If corroutputfile is defined:

    ======================    =================================================
    Filename                  Content
    ======================    =================================================
    corrlist.txt              A file
    corrlist_pear.txt         A file
    [corroutputfile]          Correlation function
    ======================    =================================================

    If debug is True:

    ======================    =================================================
    Filename                  Content
    ======================    =================================================
    filtereddata1.txt         Something
    filtereddata2.txt         Something
    ======================    =================================================
    """
    # Constants that could be arguments
    dofftcorr = True
    writecorrlists = False
    debug = False
    showpearson = True

    # These are unnecessary and should be simplified
    dopartial = bool(controlvariablefile)
    uselabel = bool(thelabel)
    dumpfiltered = bool(debug)

    if labelline:
        # TS: should prob reflect this in the parser, but it's not a big deal
        summarymode = True

    if numreps == 0:
        estimate_significance = False
    else:
        estimate_significance = True

    savecorrelation = bool(corroutputfile)

    theprefilter = tide_filt.noncausalfilter()

    if arbvec is not None and filtertype != 'arb':
        raise ValueError('Argument arbvec must be None if filtertype is '
                         'not arb')

    if arbvec is not None:
        if len(arbvec) == 2:
            arb_lower = float(arbvec[0])
            arb_upper = float(arbvec[1])
            arb_lowerstop = 0.9 * float(arbvec[0])
            arb_upperstop = 1.1 * float(arbvec[1])
        elif len(arbvec) == 4:
            arb_lower = float(arbvec[0])
            arb_upper = float(arbvec[1])
            arb_lowerstop = float(arbvec[2])
            arb_upperstop = float(arbvec[3])
        theprefilter.settype('arb')
        theprefilter.setarb(arb_lowerstop, arb_lower, arb_upper, arb_upperstop)
    else:
        theprefilter.settype(filtertype)

    inputdata1 = tide_io.readvec(infilename1)
    inputdata2 = tide_io.readvec(infilename2)
    numpoints = len(inputdata1)

    startpoint1 = max([int(starttime * Fs), 0])
    if debug:
        print('startpoint set to ', startpoint1)
    endpoint1 = min([startpoint1 + int(duration * Fs), int(len(inputdata1))])
    if debug:
        print('endpoint set to ', endpoint1)
    endpoint2 = min([int(duration * Fs), int(len(inputdata1)),
                     int(len(inputdata2))])
    trimdata1 = inputdata1[startpoint1:endpoint1]
    trimdata2 = inputdata2[0:endpoint2]

    if trimdata:
        minlen = np.min([len(trimdata1), len(trimdata2)])
        trimdata1 = trimdata1[0:minlen]
        trimdata2 = trimdata2[0:minlen]

    # band limit the regressor if that is needed
    if theprefilter.gettype() != 'none':
        if verbose:
            print("filtering to ", theprefilter.gettype(), " band")
    print(windowfunc)
    filtereddata1 = tide_math.corrnormalize(theprefilter.apply(Fs, trimdata1),
                                            prewindow=prewindow,
                                            detrendorder=detrendorder,
                                            windowfunc=windowfunc)
    filtereddata2 = tide_math.corrnormalize(theprefilter.apply(Fs, trimdata2),
                                            prewindow=prewindow,
                                            detrendorder=detrendorder,
                                            windowfunc=windowfunc)
    if flipregressor:
        filtereddata2 *= -1.0

    if dumpfiltered:
        tide_io.writenpvecs(filtereddata1, 'filtereddata1.txt')
        tide_io.writenpvecs(filtereddata2, 'filtereddata2.txt')

    if dopartial:
        controlvars = tide_io.readvecs(controlvariablefile)
        numregressors = len(controlvars)  # Added by TS. Not sure if works.
        regressorvec = []
        for j in range(0, numregressors):
            regressorvec.append(tide_math.corrnormalize(
                theprefilter.apply(Fs, controlvars[j, :]),
                prewindow=prewindow,
                detrendorder=detrendorder,
                windowfunc=windowfunc))

        if (np.max(filtereddata1) - np.min(filtereddata1)) > 0.0:
            thefit, filtereddata1 = tide_fit.mlregress(regressorvec,
                                                       filtereddata1)

        if (np.max(filtereddata2) - np.min(filtereddata2)) > 0.0:
            thefit, filtereddata2 = tide_fit.mlregress(regressorvec,
                                                       filtereddata2)

    thexcorr = tide_corr.fastcorrelate(filtereddata1, filtereddata2,
                                       usefft=dofftcorr,
                                       weighting=corrweighting,
                                       displayplots=debug)

    if calccepstraldelay:
        cepdelay = tide_corr.cepstraldelay(filtereddata1, filtereddata2,
                                           1.0 / Fs, displayplots=display)
        cepcoff = tide_corr.delayedcorr(filtereddata1, filtereddata2, cepdelay,
                                        1.0 / Fs)
        print('cepstral delay time is {0}, correlation is {1}'.format(cepdelay,
                                                                      cepcoff))
    thepxcorr = pearsonr(filtereddata1, filtereddata2)

    # calculate the coherence
    f, Cxy = sp.signal.coherence(
        tide_math.corrnormalize(theprefilter.apply(Fs, trimdata1), prewindow=prewindow,
                                detrendorder=detrendorder, windowfunc=windowfunc),
        tide_math.corrnormalize(theprefilter.apply(Fs, trimdata2), prewindow=prewindow,
                                detrendorder=detrendorder, windowfunc=windowfunc),
        Fs)

    # calculate the cross spectral density
    f, Pxy = sp.signal.csd(
        tide_math.corrnormalize(theprefilter.apply(Fs, trimdata1), prewindow=prewindow,
                                detrendorder=detrendorder, windowfunc=windowfunc),
        tide_math.corrnormalize(theprefilter.apply(Fs, trimdata2), prewindow=prewindow,
                                detrendorder=detrendorder, windowfunc=windowfunc),
        Fs)

    xcorrlen = len(thexcorr)
    sampletime = 1.0 / Fs
    xcorr_x = r_[0:xcorrlen] * sampletime - (xcorrlen * sampletime) / 2.0\
        + sampletime / 2.0
    halfwindow = int(searchrange * Fs)
    corrzero = xcorrlen // 2
    searchstart = corrzero - halfwindow
    searchend = corrzero + halfwindow
    xcorr_x_trim = xcorr_x[searchstart:searchend + 1]
    thexcorr_trim = thexcorr[searchstart:searchend + 1]
    if debug:
        print('searching for peak correlation over range ', searchstart,
              searchend)

    maxdelay = xcorr_x_trim[argmax(thexcorr_trim)]
    if debug:
        print('maxdelay before refinement', maxdelay)

    dofindmaxlag = True
    if dofindmaxlag:
        print('executing findmaxlag')
        (maxindex, maxdelay, maxval, maxsigma, maskval, failreason, peakstart,
         peakend) = tide_fit.findmaxlag_gauss(
             xcorr_x_trim, thexcorr_trim, -searchrange, searchrange, 1000.0,
             refine=True,
             useguess=False,
             fastgauss=False,
             displayplots=False)
        print(maxindex, maxdelay, maxval, maxsigma, maskval, failreason)
        R = maxval
    if debug:
        print('maxdelay after refinement', maxdelay)
        if failreason > 0:
            print('failreason =', failreason)
    else:
        R = thexcorr_trim[argmax(thexcorr_trim)]

    # set the significance threshold
    if estimate_significance:
        # generate a list of correlations from shuffled data
        (corrlist,
         corrlist_pear) = _get_null_distribution(trimdata1, xcorr_x,
                                                 theprefilter, prewindow,
                                                 detrendorder, searchstart,
                                                 searchend, Fs, dofftcorr,
                                                 corrweighting=corrweighting,
                                                 numreps=numreps,
                                                 windowfunc=windowfunc)

        # calculate percentiles for the crosscorrelation from the distribution
        histlen = 100
        thepercentiles = [0.95, 0.99, 0.995]

        (pcts, pcts_fit,
         histfit) = tide_stats.sigFromDistributionData(corrlist, histlen,
                                                       thepercentiles)
        if debug:
            tide_stats.printthresholds(pcts, thepercentiles,
                                       ('Crosscorrelation significance '
                                        'thresholds from data:'))
            tide_stats.printthresholds(pcts_fit, thepercentiles,
                                       ('Crosscorrelation significance '
                                        'thresholds from fit:'))

        # calculate significance for the pearson correlation
        (pearpcts, pearpcts_fit,
         histfit) = tide_stats.sigFromDistributionData(corrlist_pear, histlen,
                                                       thepercentiles)
        if debug:
            tide_stats.printthresholds(pearpcts, thepercentiles,
                                       ('Pearson correlation significance '
                                        'thresholds from data:'))
            tide_stats.printthresholds(pearpcts_fit, thepercentiles,
                                       ('Pearson correlation significance '
                                        'thresholds from fit:'))

        if writecorrlists:
            tide_io.writenpvecs(corrlist, 'corrlist.txt')
            tide_io.writenpvecs(corrlist_pear, 'corrlist_pear.txt')

    def printthresholds(pcts, thepercentiles, labeltext):
        print(labeltext)
        for i in range(0, len(pcts)):
            print('\tp <', "{:.3f}".format(1.0 - thepercentiles[i]), ': ',
                  pcts[i])

    # report the pearson correlation
    if showpearson and verbose:
        print('Pearson_R:\t', thepxcorr[0])
        if estimate_significance:
            for idx, percentile in enumerate(thepercentiles):
                print('    pear_p(', "{:.3f}".format(1.0 - percentile), '):\t',
                      pearpcts[idx])
        print("")

    if debug:
        print(thepxcorr)

    if verbose:
        if uselabel:
            print(thelabel, ":\t", maxdelay)
        else:
            print("Crosscorrelation_Rmax:\t", R)
            print("Crosscorrelation_maxdelay:\t", maxdelay)
            if estimate_significance:
                for idx, percentile in enumerate(thepercentiles):
                    print('    xc_p(', "{:.3f}".format(1.0 - percentile),
                          '):\t', pcts[idx])
            print(infilename1, "[0 seconds] == ", infilename2, "[",
                  -1 * maxdelay, " seconds]")

    if summarymode:
        if estimate_significance:
            if uselabel:
                if labelline:
                    print('thelabel', 'pearson_R', 'pearson_R(p=0.05)',
                          'xcorr_R', 'xcorr_R(P=0.05)', 'xcorr_maxdelay')
                print(thelabel, thepxcorr[0], pearpcts_fit[0], R, pcts_fit[0],
                      -1 * maxdelay)
            else:
                if labelline:
                    print('pearson_R', 'pearson_R(p=0.05)', 'xcorr_R',
                          'xcorr_R(P=0.05)', 'xcorr_maxdelay')
                print(thepxcorr[0], pearpcts_fit[0], R, pcts_fit[0],
                      -1 * maxdelay)
        else:
            if uselabel:
                if labelline:
                    print('thelabel', 'pearson_r', 'pearson_p', 'xcorr_R',
                          'xcorr_maxdelay')
                print(thelabel, thepxcorr[0], thepxcorr[1], R, -1 * maxdelay)
            else:
                if labelline:
                    print('pearson_r\tpearson_p\txcorr_R\txcorr_t\t'
                          'xcorr_maxdelay')
                print(thepxcorr[0], '\t', thepxcorr[1], '\t', R, '\t',
                      -1 * maxdelay)

    if savecorrelation:
        tide_io.writenpvecs(np.stack((xcorr_x, thexcorr), axis=0),
                            corroutputfile)

    if display:
        fig, ax = plt.subplots()
        # ax.set_title('GCC')
        ax.plot(xcorr_x, thexcorr, 'k')
        if debug:
            fig, ax = plt.subplots()
            ax.plot(f, Cxy)
            fig = plt.subplots()
            ax.plot(f, np.sqrt(np.abs(Pxy)) / np.max(np.sqrt(np.abs(Pxy))))
            ax.plot(f, np.angle(Pxy) / (2.0 * pi * f))
        fig.show()