Esempio n. 1
0
def test_stcorrelate(debug=False):
    tr = 0.72
    testlen = 800
    shiftdist = 5
    windowtime = 30.0
    stepsize = 5.0
    corrweighting = "None"
    outfilename = op.join(get_test_temp_path(), "stcorrtest")

    # create outputdir if it doesn't exist
    create_dir(get_test_temp_path())

    dodetrend = True
    timeaxis = np.arange(0.0, 1.0 * testlen) * tr

    testfilter = NoncausalFilter(filtertype="lfo")
    sig1 = testfilter.apply(1.0 / tr, np.random.random(testlen))
    sig2 = np.float64(np.roll(sig1, int(shiftdist)))

    if debug:
        plt.figure()
        plt.plot(sig1)
        plt.plot(sig2)
        legend = ["Original", "Shifted"]
        plt.show()

    times, corrpertime, ppertime = shorttermcorr_1D(sig1,
                                                    sig2,
                                                    tr,
                                                    windowtime,
                                                    samplestep=int(stepsize //
                                                                   tr),
                                                    detrendorder=0)
    plength = len(times)
    times, xcorrpertime, Rvals, delayvals, valid = shorttermcorr_2D(
        sig1,
        sig2,
        tr,
        windowtime,
        samplestep=int(stepsize // tr),
        weighting=corrweighting,
        detrendorder=0,
        display=False,
    )
    xlength = len(times)
    writenpvecs(corrpertime, outfilename + "_pearson.txt")
    writenpvecs(ppertime, outfilename + "_pvalue.txt")
    writenpvecs(Rvals, outfilename + "_Rvalue.txt")
    writenpvecs(delayvals, outfilename + "_delay.txt")
    writenpvecs(valid, outfilename + "_mask.txt")
Esempio n. 2
0
def test_stcorrelate(debug=False):
    tr = 0.72
    testlen = 800
    shiftdist = 5
    windowtime = 30.0
    stepsize = 5.0
    corrweighting = 'none'
    outfilename = op.join(get_test_data_path(), 'stcorrtest')
    prewindow = True
    dodetrend = True
    timeaxis = np.arange(0.0, 1.0 * testlen) * tr

    testfilter = noncausalfilter(filtertype='lfo')
    sig1 = testfilter.apply(1.0/tr, np.random.random(testlen))
    sig2 = np.float64(np.roll(sig1, int(shiftdist)))

    if debug:
        plt.figure()
        plt.plot(sig1)
        plt.plot(sig2)
        legend = ['Original', 'Shifted']
        plt.show()

    times, corrpertime, ppertime = shorttermcorr_1D(sig1, sig2, tr, windowtime, \
                                                    samplestep=int(stepsize // tr),
                                                    prewindow=prewindow,
                                                    detrendorder=0)
    plength = len(times)
    times, xcorrpertime, Rvals, delayvals, valid = shorttermcorr_2D(sig1, sig2, tr, windowtime, \
                                                                    samplestep=int(stepsize // tr),
                                                                    weighting=corrweighting, \
                                                                    prewindow=prewindow, detrendorder=0,
                                                                    display=False)
    xlength = len(times)
    writenpvecs(corrpertime, outfilename + "_pearson.txt")
    writenpvecs(ppertime, outfilename + "_pvalue.txt")
    writenpvecs(Rvals, outfilename + "_Rvalue.txt")
    writenpvecs(delayvals, outfilename + "_delay.txt")
    writenpvecs(valid, outfilename + "_mask.txt")
Esempio n. 3
0
def makeandsavehistogram(indata, histlen, endtrim, outname,
                         binsize=None,
                         displaytitle='histogram',
                         displayplots=False,
                         refine=False,
                         therange=None):
    """

    Parameters
    ----------
    indata
    histlen
    endtrim
    outname
    displaytitle
    displayplots
    refine
    therange

    Returns
    -------

    """
    thehist = makehistogram(indata, histlen, binsize=binsize, therange=therange)
    thestore = np.zeros((2, len(thehist[0])), dtype='float64')
    thestore[0, :] = (thehist[1][1:] + thehist[1][0:-1]) / 2.0
    thestore[1, :] = thehist[0][-histlen:]
    # get starting values for the peak, ignoring first and last point of histogram
    peakindex = np.argmax(thestore[1, 1:-2])
    peaklag = thestore[0, peakindex + 1]
    peakheight = thestore[1, peakindex + 1]
    numbins = 1
    while (peakindex + numbins < histlen - 1) and (thestore[1, peakindex + numbins] > peakheight / 2.0):
        numbins += 1
    peakwidth = (thestore[0, peakindex + numbins] - thestore[0, peakindex]) * 2.0
    if refine:
        peakheight, peaklag, peakwidth = tide_fit.gaussfit(peakheight, peaklag, peakwidth, thestore[0, :], thestore[1, :])
    centerofmass = np.sum(thestore[0, :] * thestore[1, :]) / np.sum(thestore[1, :])
    tide_io.writenpvecs(np.array([centerofmass]), outname + '_centerofmass.txt')
    tide_io.writenpvecs(np.array([peaklag]), outname + '_peak.txt')
    tide_io.writenpvecs(thestore, outname + '.txt')
    if displayplots:
        fig = pl.figure()
        ax = fig.add_subplot(111)
        ax.set_title(displaytitle)
        pl.plot(thestore[0, :(-1 - endtrim)], thestore[1, :(-1 - endtrim)])
Esempio n. 4
0
def niftidecomp_workflow(
    decompaxis,
    datafile,
    outputroot,
    datamaskname=None,
    decomptype="pca",
    pcacomponents=0.5,
    icacomponents=None,
    varnorm=True,
    demean=True,
    sigma=0.0,
):

    print(f"Will perform {decomptype} analysis along the {decompaxis} axis")

    if decompaxis == "temporal":
        decompaxisnum = 1
        transposeifspatial = lambda *a, **k: None
    else:
        decompaxisnum = 0
        transposeifspatial = np.transpose

    # save the command line
    tide_io.writevec([" ".join(sys.argv)], outputroot + "_commandline.txt")

    # read in data
    print("reading in data arrays")
    (
        datafile_img,
        datafile_data,
        datafile_hdr,
        datafiledims,
        datafilesizes,
    ) = tide_io.readfromnifti(datafile)

    if datamaskname is not None:
        (
            datamask_img,
            datamask_data,
            datamask_hdr,
            datamaskdims,
            datamasksizes,
        ) = tide_io.readfromnifti(datamaskname)

    xsize, ysize, numslices, timepoints = tide_io.parseniftidims(datafiledims)
    xdim, ydim, slicethickness, tr = tide_io.parseniftisizes(datafilesizes)

    # check dimensions
    if datamaskname is not None:
        print("checking mask dimensions")
        if not tide_io.checkspacedimmatch(datafiledims, datamaskdims):
            print("input mask spatial dimensions do not match image")
            exit()
        if not (tide_io.checktimematch(datafiledims, datamaskdims)
                or datamaskdims[4] == 1):
            print("input mask time dimension does not match image")
            exit()

    # save the command line
    tide_io.writevec([" ".join(sys.argv)], outputroot + "_commandline.txt")

    # smooth the data
    if sigma > 0.0:
        print("smoothing data")
        for i in range(timepoints):
            datafile_data[:, :, :,
                          i] = tide_filt.ssmooth(xdim, ydim, slicethickness,
                                                 sigma, datafile_data[:, :, :,
                                                                      i])

    # allocating arrays
    print("reshaping arrays")
    numspatiallocs = int(xsize) * int(ysize) * int(numslices)
    rs_datafile = datafile_data.reshape((numspatiallocs, timepoints))

    print("masking arrays")
    if datamaskname is not None:
        if datamaskdims[4] == 1:
            proclocs = np.where(datamask_data.reshape(numspatiallocs) > 0.5)
        else:
            proclocs = np.where(
                np.mean(datamask_data.reshape((numspatiallocs, timepoints)),
                        axis=1) > 0.5)
            rs_mask = datamask_data.reshape(
                (numspatiallocs, timepoints))[proclocs, :]
            rs_mask = np.where(rs_mask > 0.5, 1.0, 0.0)[0]
    else:
        datamaskdims = [1, xsize, ysize, numslices, 1]
        themaxes = np.max(rs_datafile, axis=1)
        themins = np.min(rs_datafile, axis=1)
        thediffs = (themaxes - themins).reshape(numspatiallocs)
        proclocs = np.where(thediffs > 0.0)
    procdata = rs_datafile[proclocs, :][0]
    print(rs_datafile.shape, procdata.shape)

    # normalize the individual images
    if demean:
        print("demeaning array")
        themean = np.mean(procdata, axis=decompaxisnum)
        print("shape of mean", themean.shape)
        for i in range(procdata.shape[1 - decompaxisnum]):
            if decompaxisnum == 1:
                procdata[i, :] -= themean[i]
            else:
                procdata[:, i] -= themean[i]
    else:
        themean = np.ones(procdata.shape[1 - decompaxisnum])

    if varnorm:
        print("variance normalizing array")
        thevar = np.var(procdata, axis=decompaxisnum)
        print("shape of var", thevar.shape)
        for i in range(procdata.shape[1 - decompaxisnum]):
            if decompaxisnum == 1:
                procdata[i, :] /= thevar[i]
            else:
                procdata[:, i] /= thevar[i]
        procdata = np.nan_to_num(procdata)
    else:
        thevar = np.ones(procdata.shape[1 - decompaxisnum])

    # applying mask
    if datamaskdims[4] > 1:
        procdata *= rs_mask

    # now perform the decomposition
    if decomptype == "ica":
        print("performing ica decomposition")
        if icacomponents is None:
            print("will return all significant components")
        else:
            print("will return", icacomponents, "components")
        thefit = FastICA(n_components=icacomponents).fit(
            transposeifspatial(procdata))  # Reconstruct signals
        if icacomponents is None:
            thecomponents = transposeifspatial(thefit.components_[:])
            print(thecomponents.shape[1], "components found")
        else:
            thecomponents = transposeifspatial(
                thefit.components_[0:icacomponents])
            print("returning first", thecomponents.shape[1],
                  "components found")
    else:
        print("performing pca decomposition")
        if pcacomponents < 1.0:
            print(
                "will return the components accounting for",
                pcacomponents * 100.0,
                "% of the variance",
            )
        else:
            print("will return", pcacomponents, "components")
        if decomptype == "pca":
            thepca = PCA(n_components=pcacomponents)
        else:
            thepca = SparsePCA(n_components=pcacomponents)
        thefit = thepca.fit(transposeifspatial(procdata))
        thetransform = thepca.transform(transposeifspatial(procdata))
        theinvtrans = transposeifspatial(
            thepca.inverse_transform(thetransform))
        if pcacomponents < 1.0:
            thecomponents = transposeifspatial(thefit.components_[:])
            print("returning", thecomponents.shape[1], "components")
        else:
            thecomponents = transposeifspatial(
                thefit.components_[0:pcacomponents])

        # save the eigenvalues
        print("variance explained by component:",
              100.0 * thefit.explained_variance_ratio_)
        tide_io.writenpvecs(
            100.0 * thefit.explained_variance_ratio_,
            outputroot + "_explained_variance_pct.txt",
        )

        if decompaxis == "temporal":
            # save the components
            print("writing component timecourses")
            tide_io.writenpvecs(thecomponents, outputroot + "_components.txt")

            # save the singular values
            print("writing singular values")
            tide_io.writenpvecs(np.transpose(thesingvals),
                                outputroot + "_singvals.txt")

            # save the coefficients
            print("writing out the coefficients")
            coefficients = thetransform
            print("coefficients shape:", coefficients.shape)
            theheader = datafile_hdr
            theheader["dim"][4] = coefficients.shape[1]
            tempout = np.zeros((numspatiallocs, coefficients.shape[1]),
                               dtype="float")
            tempout[proclocs, :] = coefficients[:, :]
            tide_io.savetonifti(
                tempout.reshape(
                    (xsize, ysize, numslices, coefficients.shape[1])),
                datafile_hdr,
                outputroot + "_coefficients",
            )
            # unnormalize the dimensionality reduced data
            for i in range(numspatiallocs):
                theinvtrans[i, :] = thevar[i] * theinvtrans[i, :] + themean[i]

        else:
            # save the component images
            print("writing component images")
            theheader = datafile_hdr
            theheader["dim"][4] = thecomponents.shape[1]
            tempout = np.zeros((numspatiallocs, thecomponents.shape[1]),
                               dtype="float")
            tempout[proclocs, :] = thecomponents[:, :]
            tide_io.savetonifti(
                tempout.reshape(
                    (xsize, ysize, numslices, thecomponents.shape[1])),
                datafile_hdr,
                outputroot + "_components",
            )

            # save the coefficients
            print("writing out the coefficients")
            coefficients = np.transpose(thetransform)
            tide_io.writenpvecs(coefficients, outputroot + "_coefficients.txt")

            # unnormalize the dimensionality reduced data
            for i in range(timepoints):
                theinvtrans[:, i] = thevar[i] * theinvtrans[:, i] + themean[i]

        print("writing fit data")
        theheader = datafile_hdr
        theheader["dim"][4] = theinvtrans.shape[1]
        tempout = np.zeros((numspatiallocs, theinvtrans.shape[1]),
                           dtype="float")
        tempout[proclocs, :] = theinvtrans[:, :]
        tide_io.savetonifti(
            tempout.reshape((xsize, ysize, numslices, theinvtrans.shape[1])),
            datafile_hdr,
            outputroot + "_fit",
        )
Esempio n. 5
0
def test_nullsimfunc(debug=False, display=False):
    # make the lfo filter
    lfofilter = tide_filt.NoncausalFilter(filtertype="lfo")

    # make the starting regressor
    timestep = 1.5
    Fs = 1.0 / timestep
    # sourcelen = 1200
    # sourcedata = lfofilter.apply(Fs, np.random.rand(sourcelen))
    sourcedata = tide_io.readvecs(
        os.path.join(get_test_data_path(), "fmri_globalmean.txt"))[0]
    sourcelen = len(sourcedata)
    numpasses = 1

    if display:
        plt.figure()
        plt.plot(sourcedata)
        plt.show()

    thexcorr = tide_corr.fastcorrelate(sourcedata, sourcedata)
    xcorrlen = len(thexcorr)
    xcorr_x = (
        np.linspace(0.0, xcorrlen, xcorrlen, endpoint=False) * timestep -
        (xcorrlen * timestep) / 2.0 + timestep / 2.0)

    if display:
        plt.figure()
        plt.plot(xcorr_x, thexcorr)
        plt.show()

    corrzero = xcorrlen // 2
    lagmin = -10
    lagmax = 10
    lagmininpts = int((-lagmin / timestep) - 0.5)
    lagmaxinpts = int((lagmax / timestep) + 0.5)

    searchstart = int(np.round(corrzero + lagmin / timestep))
    searchend = int(np.round(corrzero + lagmax / timestep))

    optiondict = {
        "numestreps": 10000,
        "showprogressbar": debug,
        "detrendorder": 3,
        "windowfunc": "hamming",
        "corrweighting": "None",
        "nprocs": 1,
        "widthlimit": 1000.0,
        "bipolar": False,
        "fixdelay": False,
        "peakfittype": "gauss",
        "lagmin": lagmin,
        "lagmax": lagmax,
        "absminsigma": 0.25,
        "absmaxsigma": 25.0,
        "edgebufferfrac": 0.0,
        "lthreshval": 0.0,
        "uthreshval": 1.0,
        "debug": False,
        "enforcethresh": True,
        "lagmod": 1000.0,
        "searchfrac": 0.5,
        "permutationmethod": "shuffle",
        "hardlimit": True,
    }
    theprefilter = tide_filt.NoncausalFilter("lfo")
    theCorrelator = tide_classes.Correlator(
        Fs=Fs,
        ncprefilter=theprefilter,
        detrendorder=optiondict["detrendorder"],
        windowfunc=optiondict["windowfunc"],
        corrweighting=optiondict["corrweighting"],
    )

    thefitter = tide_classes.SimilarityFunctionFitter(
        lagmod=optiondict["lagmod"],
        lthreshval=optiondict["lthreshval"],
        uthreshval=optiondict["uthreshval"],
        bipolar=optiondict["bipolar"],
        lagmin=optiondict["lagmin"],
        lagmax=optiondict["lagmax"],
        absmaxsigma=optiondict["absmaxsigma"],
        absminsigma=optiondict["absminsigma"],
        debug=optiondict["debug"],
        peakfittype=optiondict["peakfittype"],
        searchfrac=optiondict["searchfrac"],
        enforcethresh=optiondict["enforcethresh"],
        hardlimit=optiondict["hardlimit"],
    )

    if debug:
        print(optiondict)

    theCorrelator.setlimits(lagmininpts, lagmaxinpts)
    theCorrelator.setreftc(sourcedata)
    dummy, trimmedcorrscale, dummy = theCorrelator.getfunction()
    thefitter.setcorrtimeaxis(trimmedcorrscale)
    histograms = []
    for thenprocs in [1, -1]:
        for i in range(numpasses):
            corrlist = tide_nullsimfunc.getNullDistributionDatax(
                sourcedata,
                Fs,
                theCorrelator,
                thefitter,
                despeckle_thresh=5.0,
                fixdelay=False,
                fixeddelayvalue=0.0,
                numestreps=optiondict["numestreps"],
                nprocs=thenprocs,
                showprogressbar=optiondict["showprogressbar"],
                chunksize=1000,
                permutationmethod=optiondict["permutationmethod"],
            )
            tide_io.writenpvecs(
                corrlist, os.path.join(get_test_temp_path(),
                                       "corrdistdata.txt"))

            # calculate percentiles for the crosscorrelation from the distribution data
            histlen = 250
            thepercentiles = [0.95, 0.99, 0.995]

            pcts, pcts_fit, histfit = tide_stats.sigFromDistributionData(
                corrlist, histlen, thepercentiles)
            if debug:
                tide_stats.printthresholds(
                    pcts,
                    thepercentiles,
                    "Crosscorrelation significance thresholds from data:",
                )
                tide_stats.printthresholds(
                    pcts_fit,
                    thepercentiles,
                    "Crosscorrelation significance thresholds from fit:",
                )

            (
                thehist,
                peakheight,
                peakloc,
                peakwidth,
                centerofmass,
            ) = tide_stats.makehistogram(np.abs(corrlist),
                                         histlen,
                                         therange=[0.0, 1.0])
            histograms.append(thehist)
            thestore = np.zeros((2, len(thehist[0])), dtype="float64")
            thestore[0, :] = (thehist[1][1:] + thehist[1][0:-1]) / 2.0
            thestore[1, :] = thehist[0][-histlen:]
            if display:
                plt.figure()
                plt.plot(thestore[0, :], thestore[1, :])
                plt.show()

            # tide_stats.makeandsavehistogram(corrlist, histlen, 0,
            # os.path.join(get_test_temp_path(), 'correlationhist'),
            # displaytitle='Null correlation histogram',
            # displayplots=display, refine=False)
            assert True
Esempio n. 6
0
def refineregressor(
    fmridata,
    fmritr,
    shiftedtcs,
    weights,
    passnum,
    lagstrengths,
    lagtimes,
    lagsigma,
    lagmask,
    R2,
    theprefilter,
    optiondict,
    padtrs=60,
    bipolar=False,
    includemask=None,
    excludemask=None,
    debug=False,
    rt_floatset=np.float64,
    rt_floattype="float64",
):
    """

    Parameters
    ----------
    fmridata : 4D numpy float array
       fMRI data
    fmritr : float
        Data repetition rate, in seconds
    shiftedtcs : 4D numpy float array
        Time aligned voxel timecourses
    weights :  unknown
        unknown
    passnum : int
        Number of the pass (for labelling output)
    lagstrengths : 3D numpy float array
        Maximum correlation coefficient in every voxel
    lagtimes : 3D numpy float array
        Time delay of maximum crosscorrelation in seconds
    lagsigma : 3D numpy float array
        Gaussian width of the crosscorrelation peak, in seconds.
    lagmask : 3D numpy float array
        Mask of voxels with successful correlation fits.
    R2 : 3D numpy float array
        Square of the maximum correlation coefficient in every voxel
    theprefilter : function
        The filter function to use
    optiondict : dict
        Dictionary of all internal rapidtide configuration variables.
    padtrs : int, optional
        Number of timepoints to pad onto each end
    includemask : 3D array
        Mask of voxels to include in refinement.  Default is None (all voxels).
    excludemask : 3D array
        Mask of voxels to exclude from refinement.  Default is None (no voxels).
    debug : bool
        Enable additional debugging output.  Default is False
    rt_floatset : function
        Function to coerce variable types
    rt_floattype : {'float32', 'float64'}
        Data type for internal variables

    Returns
    -------
    volumetotal : int
        Number of voxels processed
    outputdata : float array
        New regressor
    maskarray : 3D array
        Mask of voxels used for refinement
    """
    inputshape = np.shape(fmridata)
    if optiondict["ampthresh"] < 0.0:
        if bipolar:
            theampthresh = tide_stats.getfracval(np.fabs(lagstrengths),
                                                 -optiondict["ampthresh"],
                                                 nozero=True)
        else:
            theampthresh = tide_stats.getfracval(lagstrengths,
                                                 -optiondict["ampthresh"],
                                                 nozero=True)
        print(
            "setting ampthresh to the",
            -100.0 * optiondict["ampthresh"],
            "th percentile (",
            theampthresh,
            ")",
        )
    else:
        theampthresh = optiondict["ampthresh"]
    if bipolar:
        ampmask = np.where(
            np.fabs(lagstrengths) >= theampthresh, np.int16(1), np.int16(0))
    else:
        ampmask = np.where(lagstrengths >= theampthresh, np.int16(1),
                           np.int16(0))
    if optiondict["lagmaskside"] == "upper":
        delaymask = np.where(
            (lagtimes - optiondict["offsettime"]) > optiondict["lagminthresh"],
            np.int16(1),
            np.int16(0),
        ) * np.where(
            (lagtimes - optiondict["offsettime"]) < optiondict["lagmaxthresh"],
            np.int16(1),
            np.int16(0),
        )
    elif optiondict["lagmaskside"] == "lower":
        delaymask = np.where(
            (lagtimes - optiondict["offsettime"]) <
            -optiondict["lagminthresh"],
            np.int16(1),
            np.int16(0),
        ) * np.where(
            (lagtimes - optiondict["offsettime"]) >
            -optiondict["lagmaxthresh"],
            np.int16(1),
            np.int16(0),
        )
    else:
        abslag = abs(lagtimes) - optiondict["offsettime"]
        delaymask = np.where(abslag > optiondict["lagminthresh"], np.int16(1),
                             np.int16(0)) * np.where(
                                 abslag < optiondict["lagmaxthresh"],
                                 np.int16(1), np.int16(0))
    sigmamask = np.where(lagsigma < optiondict["sigmathresh"], np.int16(1),
                         np.int16(0))
    locationmask = lagmask + 0
    if includemask is not None:
        locationmask = locationmask * includemask
    if excludemask is not None:
        locationmask = locationmask * (1 - excludemask)
    locationmask = locationmask.astype(np.int16)
    print("location mask created")

    # first generate the refine mask
    locationfails = np.sum(1 - locationmask)
    ampfails = np.sum(1 - ampmask * locationmask)
    lagfails = np.sum(1 - delaymask * locationmask)
    sigmafails = np.sum(1 - sigmamask * locationmask)
    refinemask = locationmask * ampmask * delaymask * sigmamask
    if tide_stats.getmasksize(refinemask) == 0:
        print("ERROR: no voxels in the refine mask:")
        print(
            "\n	",
            locationfails,
            " locationfails",
            "\n	",
            ampfails,
            " ampfails",
            "\n	",
            lagfails,
            " lagfails",
            "\n	",
            sigmafails,
            " sigmafails",
        )
        if (includemask is None) and (excludemask is None):
            print("\nRelax ampthresh, delaythresh, or sigmathresh - exiting")
        else:
            print(
                "\nChange include/exclude masks or relax ampthresh, delaythresh, or sigmathresh - exiting"
            )
        return 0, None, None, locationfails, ampfails, lagfails, sigmafails

    if optiondict["cleanrefined"]:
        shiftmask = locationmask
    else:
        shiftmask = refinemask
    volumetotal = np.sum(shiftmask)
    reportstep = 1000

    # timeshift the valid voxels
    if optiondict["nprocs"] > 1:
        # define the consumer function here so it inherits most of the arguments
        def timeshift_consumer(inQ, outQ):
            while True:
                try:
                    # get a new message
                    val = inQ.get()

                    # this is the 'TERM' signal
                    if val is None:
                        break

                    # process and send the data
                    outQ.put(
                        _procOneVoxelTimeShift(
                            val,
                            fmridata[val, :],
                            lagstrengths[val],
                            R2[val],
                            lagtimes[val],
                            padtrs,
                            fmritr,
                            theprefilter,
                            optiondict["fmrifreq"],
                            refineprenorm=optiondict["refineprenorm"],
                            lagmaxthresh=optiondict["lagmaxthresh"],
                            refineweighting=optiondict["refineweighting"],
                            detrendorder=optiondict["detrendorder"],
                            offsettime=optiondict["offsettime"],
                            filterbeforePCA=optiondict["filterbeforePCA"],
                            psdfilter=optiondict["psdfilter"],
                            rt_floatset=rt_floatset,
                            rt_floattype=rt_floattype,
                        ))

                except Exception as e:
                    print("error!", e)
                    break

        data_out = tide_multiproc.run_multiproc(
            timeshift_consumer,
            inputshape,
            shiftmask,
            nprocs=optiondict["nprocs"],
            showprogressbar=True,
            chunksize=optiondict["mp_chunksize"],
        )

        # unpack the data
        psdlist = []
        for voxel in data_out:
            shiftedtcs[voxel[0], :] = voxel[1]
            weights[voxel[0], :] = voxel[2]
            if optiondict["psdfilter"]:
                psdlist.append(voxel[3])
        del data_out

    else:
        psdlist = []
        for vox in range(0, inputshape[0]):
            if (vox % reportstep == 0 or vox
                    == inputshape[0] - 1) and optiondict["showprogressbar"]:
                tide_util.progressbar(vox + 1,
                                      inputshape[0],
                                      label="Percent complete (timeshifting)")
            if shiftmask[vox] > 0.5:
                retvals = _procOneVoxelTimeShift(
                    vox,
                    fmridata[vox, :],
                    lagstrengths[vox],
                    R2[vox],
                    lagtimes[vox],
                    padtrs,
                    fmritr,
                    theprefilter,
                    optiondict["fmrifreq"],
                    refineprenorm=optiondict["refineprenorm"],
                    lagmaxthresh=optiondict["lagmaxthresh"],
                    refineweighting=optiondict["refineweighting"],
                    detrendorder=optiondict["detrendorder"],
                    offsettime=optiondict["offsettime"],
                    filterbeforePCA=optiondict["filterbeforePCA"],
                    psdfilter=optiondict["psdfilter"],
                    rt_floatset=rt_floatset,
                    rt_floattype=rt_floattype,
                )
                shiftedtcs[retvals[0], :] = retvals[1]
                weights[retvals[0], :] = retvals[2]
                if optiondict["psdfilter"]:
                    psdlist.append(retvals[3])
        print()

    if optiondict["psdfilter"]:
        print(len(psdlist))
        print(psdlist[0])
        print(np.shape(np.asarray(psdlist, dtype=rt_floattype)))
        averagepsd = np.mean(np.asarray(psdlist, dtype=rt_floattype), axis=0)
        stdpsd = np.std(np.asarray(psdlist, dtype=rt_floattype), axis=0)
        snr = np.nan_to_num(averagepsd / stdpsd)

    # now generate the refined timecourse(s)
    validlist = np.where(refinemask > 0)[0]
    refinevoxels = shiftedtcs[validlist, :]
    if bipolar:
        for thevoxel in range(len(validlist)):
            if lagstrengths[validlist][thevoxel] < 0.0:
                refinevoxels[thevoxel, :] *= -1.0
    refineweights = weights[validlist]
    weightsum = np.sum(refineweights, axis=0) / volumetotal
    averagedata = np.sum(refinevoxels, axis=0) / volumetotal
    if optiondict["cleanrefined"]:
        invalidlist = np.where((1 - ampmask) > 0)[0]
        discardvoxels = shiftedtcs[invalidlist]
        discardweights = weights[invalidlist]
        discardweightsum = np.sum(discardweights, axis=0) / volumetotal
        averagediscard = np.sum(discardvoxels, axis=0) / volumetotal
    if optiondict["dodispersioncalc"]:
        print("splitting regressors by time lag for phase delay estimation")
        laglist = np.arange(
            optiondict["dispersioncalc_lower"],
            optiondict["dispersioncalc_upper"],
            optiondict["dispersioncalc_step"],
        )
        dispersioncalcout = np.zeros((np.shape(laglist)[0], inputshape[1]),
                                     dtype=rt_floattype)
        fftlen = int(inputshape[1] // 2)
        fftlen -= fftlen % 2
        dispersioncalcspecmag = np.zeros((np.shape(laglist)[0], fftlen),
                                         dtype=rt_floattype)
        dispersioncalcspecphase = np.zeros((np.shape(laglist)[0], fftlen),
                                           dtype=rt_floattype)
        for lagnum in range(0, np.shape(laglist)[0]):
            lower = laglist[lagnum] - optiondict["dispersioncalc_step"] / 2.0
            upper = laglist[lagnum] + optiondict["dispersioncalc_step"] / 2.0
            inlagrange = np.where(
                locationmask * ampmask *
                np.where(lower < lagtimes, np.int16(1), np.int16(0)) *
                np.where(lagtimes < upper, np.int16(1), np.int16(0)))[0]
            print(
                "    summing",
                np.shape(inlagrange)[0],
                "regressors with lags from",
                lower,
                "to",
                upper,
            )
            if np.shape(inlagrange)[0] > 0:
                dispersioncalcout[lagnum, :] = tide_math.corrnormalize(
                    np.mean(shiftedtcs[inlagrange], axis=0),
                    detrendorder=optiondict["detrendorder"],
                    windowfunc=optiondict["windowfunc"],
                )
                (
                    freqs,
                    dispersioncalcspecmag[lagnum, :],
                    dispersioncalcspecphase[lagnum, :],
                ) = tide_math.polarfft(dispersioncalcout[lagnum, :],
                                       1.0 / fmritr)
            inlagrange = None
        tide_io.writenpvecs(
            dispersioncalcout,
            optiondict["outputname"] + "_dispersioncalcvecs_pass" +
            str(passnum) + ".txt",
        )
        tide_io.writenpvecs(
            dispersioncalcspecmag,
            optiondict["outputname"] + "_dispersioncalcspecmag_pass" +
            str(passnum) + ".txt",
        )
        tide_io.writenpvecs(
            dispersioncalcspecphase,
            optiondict["outputname"] + "_dispersioncalcspecphase_pass" +
            str(passnum) + ".txt",
        )
        tide_io.writenpvecs(
            freqs,
            optiondict["outputname"] + "_dispersioncalcfreqs_pass" +
            str(passnum) + ".txt",
        )

    if optiondict["pcacomponents"] < 0.0:
        pcacomponents = "mle"
    elif optiondict["pcacomponents"] >= 1.0:
        pcacomponents = int(np.round(optiondict["pcacomponents"]))
    elif optiondict["pcacomponents"] == 0.0:
        print("0.0 is not an allowed value for pcacomponents")
        sys.exit()
    else:
        pcacomponents = optiondict["pcacomponents"]
    icacomponents = 1

    if optiondict["refinetype"] == "ica":
        print("performing ica refinement")
        thefit = FastICA(n_components=icacomponents).fit(
            refinevoxels)  # Reconstruct signals
        print("Using first of ", len(thefit.components_), " components")
        icadata = thefit.components_[0]
        filteredavg = tide_math.corrnormalize(
            theprefilter.apply(optiondict["fmrifreq"], averagedata),
            detrendorder=optiondict["detrendorder"],
        )
        filteredica = tide_math.corrnormalize(
            theprefilter.apply(optiondict["fmrifreq"], icadata),
            detrendorder=optiondict["detrendorder"],
        )
        thepxcorr = pearsonr(filteredavg, filteredica)[0]
        print("ica/avg correlation = ", thepxcorr)
        if thepxcorr > 0.0:
            outputdata = 1.0 * icadata
        else:
            outputdata = -1.0 * icadata
    elif optiondict["refinetype"] == "pca":
        # use the method of "A novel perspective to calibrate temporal delays in cerebrovascular reactivity
        # using hypercapnic and hyperoxic respiratory challenges". NeuroImage 187, 154?165 (2019).
        print("performing pca refinement with pcacomponents set to",
              pcacomponents)
        try:
            thefit = PCA(n_components=pcacomponents).fit(refinevoxels)
        except ValueError:
            if pcacomponents == "mle":
                print(
                    "mle estimation failed - falling back to pcacomponents=0.8"
                )
                thefit = PCA(n_components=0.8).fit(refinevoxels)
            else:
                print("unhandled math exception in PCA refinement - exiting")
                sys.exit()
        print(
            "Using ",
            len(thefit.components_),
            " component(s), accounting for ",
            "{:.2f}% of the variance".format(100.0 * np.cumsum(
                thefit.explained_variance_ratio_)[len(thefit.components_) -
                                                  1]),
        )
        reduceddata = thefit.inverse_transform(thefit.transform(refinevoxels))
        if debug:
            print("complex processing: reduceddata.shape =", reduceddata.shape)
        pcadata = np.mean(reduceddata, axis=0)
        filteredavg = tide_math.corrnormalize(
            theprefilter.apply(optiondict["fmrifreq"], averagedata),
            detrendorder=optiondict["detrendorder"],
        )
        filteredpca = tide_math.corrnormalize(
            theprefilter.apply(optiondict["fmrifreq"], pcadata),
            detrendorder=optiondict["detrendorder"],
        )
        thepxcorr = pearsonr(filteredavg, filteredpca)[0]
        print("pca/avg correlation = ", thepxcorr)
        if thepxcorr > 0.0:
            outputdata = 1.0 * pcadata
        else:
            outputdata = -1.0 * pcadata
    elif optiondict["refinetype"] == "weighted_average":
        print("performing weighted averaging refinement")
        outputdata = np.nan_to_num(averagedata / weightsum)
    else:
        print("performing unweighted averaging refinement")
        outputdata = averagedata

    if optiondict["cleanrefined"]:
        thefit, R = tide_fit.mlregress(averagediscard, averagedata)
        fitcoff = rt_floatset(thefit[0, 1])
        datatoremove = rt_floatset(fitcoff * averagediscard)
        outputdata -= datatoremove
    print()
    print(
        "Timeshift applied to " + str(int(volumetotal)) + " voxels, " +
        str(len(validlist)) + " used for refinement:",
        "\n	",
        locationfails,
        " locationfails",
        "\n	",
        ampfails,
        " ampfails",
        "\n	",
        lagfails,
        " lagfails",
        "\n	",
        sigmafails,
        " sigmafails",
    )

    if optiondict["psdfilter"]:
        outputdata = tide_filt.transferfuncfilt(outputdata, snr)

    # garbage collect
    collected = gc.collect()
    print("Garbage collector: collected %d objects." % collected)

    return volumetotal, outputdata, refinemask, locationfails, ampfails, lagfails, sigmafails
Esempio n. 7
0
def polyfitim(
    datafile,
    datamask,
    templatefile,
    templatemask,
    outputroot,
    regionatlas=None,
    order=1,
):

    # read in data
    print("reading in data arrays")
    (
        datafile_img,
        datafile_data,
        datafile_hdr,
        datafiledims,
        datafilesizes,
    ) = tide_io.readfromnifti(datafile)
    (
        datamask_img,
        datamask_data,
        datamask_hdr,
        datamaskdims,
        datamasksizes,
    ) = tide_io.readfromnifti(datamask)
    (
        templatefile_img,
        templatefile_data,
        templatefile_hdr,
        templatefiledims,
        templatefilesizes,
    ) = tide_io.readfromnifti(templatefile)
    (
        templatemask_img,
        templatemask_data,
        templatemask_hdr,
        templatemaskdims,
        templatemasksizes,
    ) = tide_io.readfromnifti(templatemask)

    if regionatlas is not None:
        (
            regionatlas_img,
            regionatlas_data,
            regionatlas_hdr,
            regionatlasdims,
            regionatlassizes,
        ) = tide_io.readfromnifti(regionatlas)

    xsize = datafiledims[1]
    ysize = datafiledims[2]
    numslices = datafiledims[3]
    timepoints = datafiledims[4]

    # check dimensions
    print("checking dimensions")
    if not tide_io.checkspacedimmatch(datafiledims, datamaskdims):
        print("input mask spatial dimensions do not match image")
        exit()
    if datamaskdims[4] == 1:
        print("using 3d data mask")
        datamask3d = True
    else:
        datamask3d = False
        if not tide_io.checktimematch(datafiledims, datamaskdims):
            print("input mask time dimension does not match image")
            exit()
    if not tide_io.checkspacedimmatch(datafiledims, templatefiledims):
        print(templatefiledims,
              "template file spatial dimensions do not match image")
        exit()
    if not templatefiledims[4] == 1:
        print("template file time dimension is not equal to 1")
        exit()
    if not tide_io.checkspacedimmatch(datafiledims, templatemaskdims):
        print("template mask spatial dimensions do not match image")
        exit()
    if not templatemaskdims[4] == 1:
        print("template mask time dimension is not equal to 1")
        exit()
    if regionatlas is not None:
        if not tide_io.checkspacedimmatch(datafiledims, regionatlasdims):
            print("template mask spatial dimensions do not match image")
            exit()
        if not regionatlasdims[4] == 1:
            print("regionatlas time dimension is not equal to 1")
            exit()

    # allocating arrays
    print("allocating arrays")
    numspatiallocs = int(xsize) * int(ysize) * int(numslices)
    rs_datafile = datafile_data.reshape((numspatiallocs, timepoints))
    if datamask3d:
        rs_datamask = datamask_data.reshape(numspatiallocs)
    else:
        rs_datamask = datamask_data.reshape((numspatiallocs, timepoints))
    rs_datamask_bin = np.where(rs_datamask > 0.9, 1.0, 0.0)
    rs_templatefile = templatefile_data.reshape(numspatiallocs)
    rs_templatemask = templatemask_data.reshape(numspatiallocs)
    rs_templatemask_bin = np.where(rs_templatemask > 0.1, 1.0, 0.0)
    if regionatlas is not None:
        rs_regionatlas = regionatlas_data.reshape(numspatiallocs)
        numregions = int(np.max(rs_regionatlas))

    fitdata = np.zeros((numspatiallocs, timepoints), dtype="float")
    # residuals = np.zeros((numspatiallocs, timepoints), dtype='float')
    # newtemplate = np.zeros((numspatiallocs), dtype='float')
    # newmask = np.zeros((numspatiallocs), dtype='float')
    if regionatlas is not None:
        lincoffs = np.zeros((numregions, timepoints), dtype="float")
        sqrcoffs = np.zeros((numregions, timepoints), dtype="float")
        offsets = np.zeros((numregions, timepoints), dtype="float")
        rvals = np.zeros((numregions, timepoints), dtype="float")
    else:
        lincoffs = np.zeros(timepoints, dtype="float")
        sqrcoffs = np.zeros(timepoints, dtype="float")
        offsets = np.zeros(timepoints, dtype="float")
        rvals = np.zeros(timepoints, dtype="float")

    if regionatlas is not None:
        print("making region masks")
        regionvoxels = np.zeros((numspatiallocs, numregions), dtype="float")
        for region in range(0, numregions):
            thisregion = np.where((rs_regionatlas *
                                   rs_templatemask_bin) == (region + 1))
            regionvoxels[thisregion, region] = 1.0

    # mask everything
    print("masking template")
    maskedtemplate = rs_templatefile * rs_templatemask_bin

    # cycle over all images
    print("now cycling over all images")
    for thetime in range(0, timepoints):
        print("fitting timepoint", thetime)

        # get the appropriate mask
        if datamask3d:
            for i in range(timepoints):
                thisdatamask = rs_datamask_bin
        else:
            thisdatamask = rs_datamask_bin[:, thetime]
        if regionatlas is not None:
            for region in range(0, numregions):
                voxelstofit = np.where(
                    regionvoxels[:, region] * thisdatamask > 0.5)
                voxelstoreconstruct = np.where(regionvoxels[:, region] > 0.5)
                if order == 2:
                    thefit, R = tide_fit.mlregress(
                        [
                            rs_templatefile[voxelstofit],
                            np.square(rs_templatefile[voxelstofit]),
                        ],
                        rs_datafile[voxelstofit, thetime][0],
                    )
                else:
                    thefit, R = tide_fit.mlregress(
                        rs_templatefile[voxelstofit],
                        rs_datafile[voxelstofit, thetime][0],
                    )
                lincoffs[region, thetime] = thefit[0, 1]
                offsets[region, thetime] = thefit[0, 0]
                rvals[region, thetime] = R
                if order == 2:
                    sqrcoffs[region, thetime] = thefit[0, 2]
                    fitdata[voxelstoreconstruct, thetime] += (
                        sqrcoffs[region, thetime] *
                        np.square(rs_templatefile[voxelstoreconstruct]) +
                        lincoffs[region, thetime] *
                        rs_templatefile[voxelstoreconstruct] +
                        offsets[region, thetime])
                else:
                    fitdata[voxelstoreconstruct,
                            thetime] += (lincoffs[region, thetime] *
                                         rs_templatefile[voxelstoreconstruct] +
                                         offsets[region, thetime])
                # newtemplate += nan_to_num(maskeddata[:, thetime] / lincoffs[region, thetime]) * rs_datamask
                # newmask += rs_datamask * rs_templatemask_bin
        else:
            voxelstofit = np.where(thisdatamask > 0.5)
            voxelstoreconstruct = np.where(rs_templatemask > 0.5)
            thefit, R = tide_fit.mlregress(
                rs_templatefile[voxelstofit], rs_datafile[voxelstofit,
                                                          thetime][0])
            lincoffs[thetime] = thefit[0, 1]
            offsets[thetime] = thefit[0, 0]
            rvals[thetime] = R
            fitdata[voxelstoreconstruct, thetime] = (
                lincoffs[thetime] * rs_templatefile[voxelstoreconstruct] +
                offsets[thetime])
            # if datamask3d:
            #    newtemplate += nan_to_num(maskeddata[:, thetime] / lincoffs[thetime]) * rs_datamask
            # else:
            #    newtemplate += nan_to_num(maskeddata[:, thetime] / lincoffs[thetime]) * rs_datamask[:, thetime]
            # newmask += rs_datamask[:, thetime] * rs_templatemask_bin
    residuals = rs_datafile - fitdata

    # write out the data files
    print("writing time series")
    if order == 2:
        tide_io.writenpvecs(sqrcoffs, outputroot + "_sqrcoffs.txt")
    tide_io.writenpvecs(lincoffs, outputroot + "_lincoffs.txt")
    tide_io.writenpvecs(offsets, outputroot + "_offsets.txt")
    tide_io.writenpvecs(rvals, outputroot + "_rvals.txt")
    if regionatlas is not None:
        for region in range(0, numregions):
            print(
                "region",
                region + 1,
                "slope mean, std:",
                np.mean(lincoffs[:, region]),
                np.std(lincoffs[:, region]),
            )
            print(
                "region",
                region + 1,
                "offset mean, std:",
                np.mean(offsets[:, region]),
                np.std(offsets[:, region]),
            )
    else:
        print("slope mean, std:", np.mean(lincoffs), np.std(lincoffs))
        print("offset mean, std:", np.mean(offsets), np.std(offsets))

    print("writing nifti series")
    tide_io.savetonifti(
        fitdata.reshape((xsize, ysize, numslices, timepoints)),
        datafile_hdr,
        outputroot + "_fit",
    )
    tide_io.savetonifti(
        residuals.reshape((xsize, ysize, numslices, timepoints)),
        datafile_hdr,
        outputroot + "_residuals",
    )
Esempio n. 8
0
def roisummarize(args):
    # grab the command line arguments then pass them off.
    try:
        args = _get_parser().parse_args()
    except SystemExit:
        _get_parser().print_help()
        raise

    # set the sample rate
    if args.samplerate == "auto":
        args.samplerate = 1.0
    else:
        samplerate = args.samplerate

    args, thefilter = pf.postprocessfilteropts(args, debug=args.debug)

    print("loading fmri data")
    input_img, input_data, input_hdr, thedims, thesizes = tide_io.readfromnifti(
        args.inputfilename)
    print("loading template data")
    template_img, template_data, template_hdr, templatedims, templatesizes = tide_io.readfromnifti(
        args.templatefile)

    print("checking dimensions")
    if not tide_io.checkspacematch(input_hdr, template_hdr):
        print(
            "template file does not match spatial coverage of input fmri file")
        sys.exit()

    print("reshaping")
    xsize = thedims[1]
    ysize = thedims[2]
    numslices = thedims[3]
    numtimepoints = thedims[4]
    numvoxels = int(xsize) * int(ysize) * int(numslices)
    templatevoxels = np.reshape(template_data, numvoxels).astype(int)

    if numtimepoints > 1:
        inputvoxels = np.reshape(input_data,
                                 (numvoxels, numtimepoints))[:, args.numskip:]
        print("filtering")
        for thevoxel in range(numvoxels):
            if templatevoxels[thevoxel] > 0:
                inputvoxels[thevoxel, :] = thefilter.apply(
                    args.samplerate, inputvoxels[thevoxel, :])

        print("summarizing")
        timecourses = summarize4Dbylabel(inputvoxels,
                                         templatevoxels,
                                         normmethod=args.normmethod,
                                         debug=args.debug)

        print("writing data")
        tide_io.writenpvecs(timecourses, args.outputfile + "_timecourses")
    else:
        inputvoxels = np.reshape(input_data, (numvoxels))
        numregions = np.max(templatevoxels)
        template_hdr["dim"][4] = numregions
        outputvoxels, regionstats = summarize3Dbylabel(inputvoxels,
                                                       templatevoxels,
                                                       debug=args.debug)
        tide_io.savetonifti(
            outputvoxels.reshape((xsize, ysize, numslices)),
            template_hdr,
            args.outputfile + "_meanvals",
        )
        tide_io.writenpvecs(np.array(regionstats),
                            args.outputfile + "_regionstats.txt")
Esempio n. 9
0
def showxcorrx_workflow(infilename1, infilename2, Fs,
                        thelabel='', starttime=0., duration=1000000.,
                        searchrange=15.,
                        display=True, trimdata=False,
                        summarymode=False, labelline=False,
                        flipregressor=False, windowfunc='hamming',
                        calccepstraldelay=False, corroutputfile=False,
                        controlvariablefile=None, numreps=0,
                        arbvec=None, filtertype='arb', corrweighting='none',
                        detrendorder=1, prewindow=True, verbose=False):
    r"""Calculate and display crosscorrelation between two timeseries.

    Parameters
    ----------
    infilename1 : str
        The name of a text file containing a timeseries, one timepoint per line.
    infilename2 : str
        The name of a text file containing a timeseries, one timepoint per line.
    Fs : float
        The sample rate of the time series, in Hz.
    thelabel : str, optional
        The label for the output graph.  Default is blank.
    starttime : float, optional
        Time offset into the timeseries, in seconds, to start using the time data.  Default is 0
    duration : float, optional
        Length of time from each time series, in seconds, to use for the cross-correlation.  Default is the entire time series.
    searchrange : float, optional
        Only search for cross-correlation peaks between -searchrange and +searchrange seconds (default is 15).
    display : bool, optional
        Plot cross-correlation function in a matplotlib window.  Default is True.
    trimdata : bool, optional
        Trim time series to the length of the shorter series.  Default is False.
    summarymode : bool, optional
        Output a table of interesting results for later processing.  Default is False.
    labelline : bool, optional
        Print an explanatory header line over the summary information.  Default is False.
    flipregressor : bool, optional
        Invert timeseries 2 prior to cross-correlation.
    windowfunc : {'hamming', 'hann', 'blackmanharris'}
        Window function to apply prior to cross-correlation.  Default is 'hamming'.
    calccepstraldelay : bool, optional
        Use cepstral estimation of delay.  Default is False.
    corroutputfile : bool, optional
        Save the correlation function to a file.  Default is False.
    controlvariablefile : bool, optional
        Save internal variables to a text file.  Default is False.
    numreps : int, optional
        Number of null correlations to perform to estimate significance.  Default is 10000
    arbvec : [float,float,float,float], optional
        Frequency limits of the arb_pass filter.
    filtertype : 'none', 'card', 'lfo', 'vlf', 'resp', 'arb'
        Type of filter to apply data prior to correlation.  Default is 'none'
    corrweighting : {'none', 'Liang', 'Eckart', 'PHAT'}, optional
         Weighting function to apply to the crosscorrelation in the Fourier domain.  Default is 'none'
    detrendorder : int, optional
       Order of polynomial used to detrend crosscorrelation inputs.  Default is 1 (0 disables)
    prewindow : bool, optional
        Apply window function prior to cross-correlation.  Default is True.
    verbose : bool, optional
        Print internal status information.  Default is False.

    Notes
    -----
    This workflow writes out several files:

    If corroutputfile is defined:

    ======================    =================================================
    Filename                  Content
    ======================    =================================================
    corrlist.txt              A file
    corrlist_pear.txt         A file
    [corroutputfile]          Correlation function
    ======================    =================================================

    If debug is True:

    ======================    =================================================
    Filename                  Content
    ======================    =================================================
    filtereddata1.txt         Something
    filtereddata2.txt         Something
    ======================    =================================================
    """
    # Constants that could be arguments
    dofftcorr = True
    writecorrlists = False
    debug = False
    showpearson = True

    # These are unnecessary and should be simplified
    dopartial = bool(controlvariablefile)
    uselabel = bool(thelabel)
    dumpfiltered = bool(debug)

    if labelline:
        # TS: should prob reflect this in the parser, but it's not a big deal
        summarymode = True

    if numreps == 0:
        estimate_significance = False
    else:
        estimate_significance = True

    savecorrelation = bool(corroutputfile)

    theprefilter = tide_filt.noncausalfilter()

    if arbvec is not None and filtertype != 'arb':
        raise ValueError('Argument arbvec must be None if filtertype is '
                         'not arb')

    if arbvec is not None:
        if len(arbvec) == 2:
            arb_lower = float(arbvec[0])
            arb_upper = float(arbvec[1])
            arb_lowerstop = 0.9 * float(arbvec[0])
            arb_upperstop = 1.1 * float(arbvec[1])
        elif len(arbvec) == 4:
            arb_lower = float(arbvec[0])
            arb_upper = float(arbvec[1])
            arb_lowerstop = float(arbvec[2])
            arb_upperstop = float(arbvec[3])
        theprefilter.settype('arb')
        theprefilter.setarb(arb_lowerstop, arb_lower, arb_upper, arb_upperstop)
    else:
        theprefilter.settype(filtertype)

    inputdata1 = tide_io.readvec(infilename1)
    inputdata2 = tide_io.readvec(infilename2)
    numpoints = len(inputdata1)

    startpoint1 = max([int(starttime * Fs), 0])
    if debug:
        print('startpoint set to ', startpoint1)
    endpoint1 = min([startpoint1 + int(duration * Fs), int(len(inputdata1))])
    if debug:
        print('endpoint set to ', endpoint1)
    endpoint2 = min([int(duration * Fs), int(len(inputdata1)),
                     int(len(inputdata2))])
    trimdata1 = inputdata1[startpoint1:endpoint1]
    trimdata2 = inputdata2[0:endpoint2]

    if trimdata:
        minlen = np.min([len(trimdata1), len(trimdata2)])
        trimdata1 = trimdata1[0:minlen]
        trimdata2 = trimdata2[0:minlen]

    # band limit the regressor if that is needed
    if theprefilter.gettype() != 'none':
        if verbose:
            print("filtering to ", theprefilter.gettype(), " band")
    print(windowfunc)
    filtereddata1 = tide_math.corrnormalize(theprefilter.apply(Fs, trimdata1),
                                            prewindow=prewindow,
                                            detrendorder=detrendorder,
                                            windowfunc=windowfunc)
    filtereddata2 = tide_math.corrnormalize(theprefilter.apply(Fs, trimdata2),
                                            prewindow=prewindow,
                                            detrendorder=detrendorder,
                                            windowfunc=windowfunc)
    if flipregressor:
        filtereddata2 *= -1.0

    if dumpfiltered:
        tide_io.writenpvecs(filtereddata1, 'filtereddata1.txt')
        tide_io.writenpvecs(filtereddata2, 'filtereddata2.txt')

    if dopartial:
        controlvars = tide_io.readvecs(controlvariablefile)
        numregressors = len(controlvars)  # Added by TS. Not sure if works.
        regressorvec = []
        for j in range(0, numregressors):
            regressorvec.append(tide_math.corrnormalize(
                theprefilter.apply(Fs, controlvars[j, :]),
                prewindow=prewindow,
                detrendorder=detrendorder,
                windowfunc=windowfunc))

        if (np.max(filtereddata1) - np.min(filtereddata1)) > 0.0:
            thefit, filtereddata1 = tide_fit.mlregress(regressorvec,
                                                       filtereddata1)

        if (np.max(filtereddata2) - np.min(filtereddata2)) > 0.0:
            thefit, filtereddata2 = tide_fit.mlregress(regressorvec,
                                                       filtereddata2)

    thexcorr = tide_corr.fastcorrelate(filtereddata1, filtereddata2,
                                       usefft=dofftcorr,
                                       weighting=corrweighting,
                                       displayplots=debug)

    if calccepstraldelay:
        cepdelay = tide_corr.cepstraldelay(filtereddata1, filtereddata2,
                                           1.0 / Fs, displayplots=display)
        cepcoff = tide_corr.delayedcorr(filtereddata1, filtereddata2, cepdelay,
                                        1.0 / Fs)
        print('cepstral delay time is {0}, correlation is {1}'.format(cepdelay,
                                                                      cepcoff))
    thepxcorr = pearsonr(filtereddata1, filtereddata2)

    # calculate the coherence
    f, Cxy = sp.signal.coherence(
        tide_math.corrnormalize(theprefilter.apply(Fs, trimdata1), prewindow=prewindow,
                                detrendorder=detrendorder, windowfunc=windowfunc),
        tide_math.corrnormalize(theprefilter.apply(Fs, trimdata2), prewindow=prewindow,
                                detrendorder=detrendorder, windowfunc=windowfunc),
        Fs)

    # calculate the cross spectral density
    f, Pxy = sp.signal.csd(
        tide_math.corrnormalize(theprefilter.apply(Fs, trimdata1), prewindow=prewindow,
                                detrendorder=detrendorder, windowfunc=windowfunc),
        tide_math.corrnormalize(theprefilter.apply(Fs, trimdata2), prewindow=prewindow,
                                detrendorder=detrendorder, windowfunc=windowfunc),
        Fs)

    xcorrlen = len(thexcorr)
    sampletime = 1.0 / Fs
    xcorr_x = r_[0:xcorrlen] * sampletime - (xcorrlen * sampletime) / 2.0\
        + sampletime / 2.0
    halfwindow = int(searchrange * Fs)
    corrzero = xcorrlen // 2
    searchstart = corrzero - halfwindow
    searchend = corrzero + halfwindow
    xcorr_x_trim = xcorr_x[searchstart:searchend + 1]
    thexcorr_trim = thexcorr[searchstart:searchend + 1]
    if debug:
        print('searching for peak correlation over range ', searchstart,
              searchend)

    maxdelay = xcorr_x_trim[argmax(thexcorr_trim)]
    if debug:
        print('maxdelay before refinement', maxdelay)

    dofindmaxlag = True
    if dofindmaxlag:
        print('executing findmaxlag')
        (maxindex, maxdelay, maxval, maxsigma, maskval, failreason, peakstart,
         peakend) = tide_fit.findmaxlag_gauss(
             xcorr_x_trim, thexcorr_trim, -searchrange, searchrange, 1000.0,
             refine=True,
             useguess=False,
             fastgauss=False,
             displayplots=False)
        print(maxindex, maxdelay, maxval, maxsigma, maskval, failreason)
        R = maxval
    if debug:
        print('maxdelay after refinement', maxdelay)
        if failreason > 0:
            print('failreason =', failreason)
    else:
        R = thexcorr_trim[argmax(thexcorr_trim)]

    # set the significance threshold
    if estimate_significance:
        # generate a list of correlations from shuffled data
        (corrlist,
         corrlist_pear) = _get_null_distribution(trimdata1, xcorr_x,
                                                 theprefilter, prewindow,
                                                 detrendorder, searchstart,
                                                 searchend, Fs, dofftcorr,
                                                 corrweighting=corrweighting,
                                                 numreps=numreps,
                                                 windowfunc=windowfunc)

        # calculate percentiles for the crosscorrelation from the distribution
        histlen = 100
        thepercentiles = [0.95, 0.99, 0.995]

        (pcts, pcts_fit,
         histfit) = tide_stats.sigFromDistributionData(corrlist, histlen,
                                                       thepercentiles)
        if debug:
            tide_stats.printthresholds(pcts, thepercentiles,
                                       ('Crosscorrelation significance '
                                        'thresholds from data:'))
            tide_stats.printthresholds(pcts_fit, thepercentiles,
                                       ('Crosscorrelation significance '
                                        'thresholds from fit:'))

        # calculate significance for the pearson correlation
        (pearpcts, pearpcts_fit,
         histfit) = tide_stats.sigFromDistributionData(corrlist_pear, histlen,
                                                       thepercentiles)
        if debug:
            tide_stats.printthresholds(pearpcts, thepercentiles,
                                       ('Pearson correlation significance '
                                        'thresholds from data:'))
            tide_stats.printthresholds(pearpcts_fit, thepercentiles,
                                       ('Pearson correlation significance '
                                        'thresholds from fit:'))

        if writecorrlists:
            tide_io.writenpvecs(corrlist, 'corrlist.txt')
            tide_io.writenpvecs(corrlist_pear, 'corrlist_pear.txt')

    def printthresholds(pcts, thepercentiles, labeltext):
        print(labeltext)
        for i in range(0, len(pcts)):
            print('\tp <', "{:.3f}".format(1.0 - thepercentiles[i]), ': ',
                  pcts[i])

    # report the pearson correlation
    if showpearson and verbose:
        print('Pearson_R:\t', thepxcorr[0])
        if estimate_significance:
            for idx, percentile in enumerate(thepercentiles):
                print('    pear_p(', "{:.3f}".format(1.0 - percentile), '):\t',
                      pearpcts[idx])
        print("")

    if debug:
        print(thepxcorr)

    if verbose:
        if uselabel:
            print(thelabel, ":\t", maxdelay)
        else:
            print("Crosscorrelation_Rmax:\t", R)
            print("Crosscorrelation_maxdelay:\t", maxdelay)
            if estimate_significance:
                for idx, percentile in enumerate(thepercentiles):
                    print('    xc_p(', "{:.3f}".format(1.0 - percentile),
                          '):\t', pcts[idx])
            print(infilename1, "[0 seconds] == ", infilename2, "[",
                  -1 * maxdelay, " seconds]")

    if summarymode:
        if estimate_significance:
            if uselabel:
                if labelline:
                    print('thelabel', 'pearson_R', 'pearson_R(p=0.05)',
                          'xcorr_R', 'xcorr_R(P=0.05)', 'xcorr_maxdelay')
                print(thelabel, thepxcorr[0], pearpcts_fit[0], R, pcts_fit[0],
                      -1 * maxdelay)
            else:
                if labelline:
                    print('pearson_R', 'pearson_R(p=0.05)', 'xcorr_R',
                          'xcorr_R(P=0.05)', 'xcorr_maxdelay')
                print(thepxcorr[0], pearpcts_fit[0], R, pcts_fit[0],
                      -1 * maxdelay)
        else:
            if uselabel:
                if labelline:
                    print('thelabel', 'pearson_r', 'pearson_p', 'xcorr_R',
                          'xcorr_maxdelay')
                print(thelabel, thepxcorr[0], thepxcorr[1], R, -1 * maxdelay)
            else:
                if labelline:
                    print('pearson_r\tpearson_p\txcorr_R\txcorr_t\t'
                          'xcorr_maxdelay')
                print(thepxcorr[0], '\t', thepxcorr[1], '\t', R, '\t',
                      -1 * maxdelay)

    if savecorrelation:
        tide_io.writenpvecs(np.stack((xcorr_x, thexcorr), axis=0),
                            corroutputfile)

    if display:
        fig, ax = plt.subplots()
        # ax.set_title('GCC')
        ax.plot(xcorr_x, thexcorr, 'k')
        if debug:
            fig, ax = plt.subplots()
            ax.plot(f, Cxy)
            fig = plt.subplots()
            ax.plot(f, np.sqrt(np.abs(Pxy)) / np.max(np.sqrt(np.abs(Pxy))))
            ax.plot(f, np.angle(Pxy) / (2.0 * pi * f))
        fig.show()
Esempio n. 10
0
def showtc(args):
    # set the sample rate
    if args.samplerate == "auto":
        samplerate = 1.0
        args.samplerate = samplerate
    else:
        samplerate = args.samplerate

    # set the appropriate display mode
    if args.displaymode == "time":
        dospectrum = False
        specmode = "power"
    elif args.displaymode == "power":
        dospectrum = True
        specmode = "power"
    elif args.displaymode == "phase":
        dospectrum = True
        specmode = "phase"
    else:
        print("illegal display mode")
        sys.exit()

    # determine how to composite multiple plots
    if args.plotformat == "overlaid":
        separate = False
        linky = True
    elif args.plotformat == "separate":
        separate = True
        linky = False
    elif args.plotformat == "separatelinked":
        separate = True
        linky = True
    else:
        print("illegal formatting mode")
        sys.exit()

    # set various cosmetic aspects of the plots
    if args.colors is not None:
        colornames = args.colors.split(",")
    else:
        colornames = []

    if args.legends is not None:
        legends = args.legends.split(",")
        legendset = True
    else:
        legends = []
        legendset = False
    dolegend = args.dolegend

    if args.linewidths is not None:
        thelinewidth = []
        for thestring in args.linewidths.split(","):
            thelinewidth.append(float(thestring))
    else:
        thelinewidth = [1.0]
    numlinewidths = len(thelinewidth)

    if 0 <= args.legendloc <= 10:
        legendloc = args.legendloc
    else:
        print("illegal legend location:", args.legendloc)
        sys.exit()

    savespec = False
    detrendorder = 1
    demean = False
    useHamming = True

    # check range
    if args.theendtime is None:
        args.theendtime = 100000000.0
    if args.thestarttime is not None:
        if args.thestarttime >= args.theendtime:
            print("endtime must be greater then starttime;")
            sys.exit()

    # handle required args first
    xvecs = []
    yvecs = []
    linelabels = []
    samplerates = []
    numvecs = 0

    minlen = 100000000
    shortcolnames = True
    # read in all the data
    for i in range(0, len(args.textfilenames)):
        thisfilename, thiscolspec = tide_io.parsefilespec(args.textfilenames[i])

        # check file type
        (
            thissamplerate,
            thisstartoffset,
            colnames,
            invecs,
            dummy,
            dummy,
        ) = tide_io.readvectorsfromtextfile(args.textfilenames[i], debug=args.debug)

        if args.debug:
            print("On return from readvectorsfromtextfile:")
            print(f"\targs.samplerate: {args.samplerate}")
            print(f"\tthissamplerate: {thissamplerate}")
            print(f"\targs.thestarttime: {args.thestarttime}")
            print(f"\tthisstartoffset: {thisstartoffset}")

        if args.debug:
            print("input data dimensions:", invecs.shape)

        if thissamplerate is None:
            thissamplerate = samplerate

        if thisstartoffset is None:
            # print("thisstartoffset is None")
            if args.thestarttime is None:
                print("args.thestarttime is None")
                args.thestarttime = 0.0
            else:
                print(f"args.thestarttime is {args.thestarttime}")
            thisstartoffset = args.thestarttime
        else:
            # print(f"thisstartoffset is {thisstartoffset}")
            if args.thestarttime is None:
                print("args.thestarttime is None")
                args.thestarttime = thisstartoffset
            else:
                print(f"args.thestarttime is {args.thestarttime}")
                thisstartoffset = args.thestarttime

        if args.debug:
            print("After preprocessing time variables:")
            print(f"\targs.samplerate: {args.samplerate}")
            print(f"\tthissamplerate: {thissamplerate}")
            print(f"\targs.thestarttime: {args.thestarttime}")
            print(f"\tthisstartoffset: {thisstartoffset}")

        if args.debug:
            print(f"file {args.textfilenames[i]} colnames: {colnames}")

        if args.dotranspose:
            invecs = np.transpose(invecs)
        if args.debug:
            print("   ", invecs.shape[0], " columns")

        for j in range(0, invecs.shape[0]):
            if args.debug:
                print("appending vector number ", j)
            if dospectrum:
                if invecs.shape[1] % 2 == 1:
                    invec = invecs[j, :-1]
                else:
                    invec = invecs[j, :]
                if detrendorder > 0:
                    invec = tide_fit.detrend(invec, order=detrendorder, demean=True)
                elif demean:
                    invec = invec - np.mean(invec)

                if useHamming:
                    freqaxis, spectrum = tide_filt.spectrum(
                        tide_filt.hamming(len(invec)) * invec, Fs=thissamplerate, mode=specmode,
                    )
                else:
                    freqaxis, spectrum = tide_filt.spectrum(
                        invec, Fs=thissamplerate, mode=specmode
                    )
                if savespec:
                    tide_io.writenpvecs(
                        np.transpose(np.stack([freqaxis, spectrum], axis=1)), "thespectrum.txt",
                    )
                xvecs.append(freqaxis)
                yvecs.append(spectrum)
            else:
                yvecs.append(invecs[j] * 1.0)
                xvecs.append(
                    thisstartoffset + np.arange(0.0, len(yvecs[-1]), 1.0) / thissamplerate
                )
            if len(yvecs[-1]) < minlen:
                minlen = len(yvecs[-1])
            if not legendset:
                if invecs.shape[0] > 1:
                    if colnames is None:
                        if shortcolnames:
                            linelabels.append("column" + str(j).zfill(2))
                        else:
                            linelabels.append(thisfilename + "_column" + str(j).zfill(2))

                    else:
                        if shortcolnames:
                            linelabels.append(colnames[j])
                        else:
                            linelabels.append(thisfilename + "_" + colnames[j])
                else:
                    linelabels.append(thisfilename)
            else:
                linelabels.append(legends[i % len(legends)])
                """if invecs.shape[0] > 1:
                    linelabels.append(legends[i % len(legends)] + '_column' + str(j).zfill(2))
                else:
                    linelabels.append(legends[i % len(legends)])"""
            samplerates.append(thissamplerate + 0.0)
            if args.debug:
                print(
                    "timecourse:",
                    j,
                    ", len:",
                    len(xvecs[-1]),
                    ", timerange:",
                    xvecs[-1][0],
                    xvecs[-1][-1],
                )
            numvecs += 1

    thestartpoint = tide_util.valtoindex(xvecs[0], args.thestarttime)
    theendpoint = tide_util.valtoindex(xvecs[0], args.theendtime)
    args.thestarttime = xvecs[0][thestartpoint]
    args.theendtime = xvecs[0][theendpoint]
    if args.debug:
        print("full range (pts):", thestartpoint, theendpoint)
        print("full range (time):", args.thestarttime, args.theendtime)
    overallxmax = -1e38
    overallxmin = 1e38
    for thevec in xvecs:
        overallxmax = np.max([np.max(thevec), overallxmax])
        overallxmin = np.min([np.min(thevec), overallxmin])
    xrange = (np.max([overallxmin, args.thestarttime]), np.min([overallxmax, args.theendtime]))
    ymins = []
    ymaxs = []
    for thevec in yvecs:
        ymins.append(np.min(np.asarray(thevec[thestartpoint:theendpoint], dtype="float")))
        ymaxs.append(np.max(np.asarray(thevec[thestartpoint:theendpoint], dtype="float")))
    overallymax = -1e38
    overallymin = 1e38
    for thevec in yvecs:
        overallymax = np.max([np.max(thevec), overallymax])
        overallymin = np.min([np.min(thevec), overallymin])
    yrange = (overallymin, overallymax)
    if args.debug:
        print("xrange:", xrange)
        print("yrange:", yrange)
    if args.voffset < 0.0:
        args.voffset = yrange[1] - yrange[0]
    if args.debug:
        print("voffset:", args.voffset)
    if not separate:
        for i in range(0, numvecs):
            yvecs[i] += (numvecs - i - 1) * args.voffset
        overallymax = -1e38
        overallymin = 1e38
        for thevec in yvecs:
            overallymax = np.max([np.max(thevec), overallymax])
            overallymin = np.min([np.min(thevec), overallymin])
        yrange = (overallymin, overallymax)

        if args.dowaterfall:
            xstep = (xrange[1] - xrange[0]) / numvecs
            ystep = yrange[1] - yrange[0]
            for i in range(numvecs):
                xvecs[i] = xvecs[i] + i * xstep
                yvecs[i] = 10.0 * yvecs[i] / ystep + i * ystep

    # now plot it out
    if separate:
        thexaxfontsize = 6 * args.fontscalefac
        theyaxfontsize = 6 * args.fontscalefac
        thexlabelfontsize = 6 * args.fontscalefac
        theylabelfontsize = 6 * args.fontscalefac
        thelegendfontsize = 5 * args.fontscalefac
        thetitlefontsize = 6 * args.fontscalefac
        thesuptitlefontsize = 10 * args.fontscalefac
    else:
        thexaxfontsize = 10 * args.fontscalefac
        theyaxfontsize = 10 * args.fontscalefac
        thexlabelfontsize = 10 * args.fontscalefac
        theylabelfontsize = 10 * args.fontscalefac
        thelegendfontsize = 8 * args.fontscalefac
        thetitlefontsize = 10 * args.fontscalefac
        thesuptitlefontsize = 10 * args.fontscalefac

    if len(colornames) > 0:
        colorlist = [colornames[i % len(colornames)] for i in range(numvecs)]
    else:
        colorlist = [cm.nipy_spectral(float(i) / numvecs) for i in range(numvecs)]

    fig = figure()
    if separate:
        if args.thetitle is not None:
            fig.suptitle(args.thetitle, fontsize=thesuptitlefontsize)
        if linky:
            axlist = fig.subplots(numvecs, sharex=True, sharey=True)[:]
        else:
            axlist = fig.subplots(numvecs, sharex=True, sharey=False)[:]
    else:
        ax = fig.add_subplot(1, 1, 1)
        if args.thetitle is not None:
            ax.set_title(args.thetitle, fontsize=thetitlefontsize)

    for i in range(0, numvecs):
        if separate:
            ax = axlist[i]
        ax.plot(
            xvecs[i],
            yvecs[i],
            color=colorlist[i],
            label=linelabels[i],
            linewidth=thelinewidth[i % numlinewidths],
        )
        if dolegend:
            ax.legend(fontsize=thelegendfontsize, loc=legendloc)
        ax.set_xlim(xrange)
        if linky:
            # print(yrange)
            ax.set_ylim(yrange)
        else:
            themax = np.max(yvecs[i])
            themin = np.min(yvecs[i])
            thediff = themax - themin
            # print(themin, themax, thediff)
            ax.set_ylim(top=(themax + thediff / 20.0), bottom=(themin - thediff / 20.0))
        if args.showxax:
            ax.tick_params(axis="x", labelsize=thexlabelfontsize, which="both")
        if args.showyax:
            ax.tick_params(axis="y", labelsize=theylabelfontsize, which="both")

        if separate:
            fig.subplots_adjust(hspace=0)
            setp([a.get_xticklabels() for a in fig.axes[:-1]], visible=False)

        if dospectrum:
            if args.xlabel is None:
                args.xlabel = "Frequency (Hz)"
            if specmode == "power":
                if args.ylabel is None:
                    args.ylabel = "Signal power"
            else:
                if args.ylabel is None:
                    args.ylabel = "Signal phase"
        else:
            if args.xlabel is None:
                args.xlabel = "Time (s)"
        if args.showxax:
            ax.set_xlabel(args.xlabel, fontsize=thexlabelfontsize, fontweight="bold")
        else:
            ax.xaxis.set_visible(False)
        if args.showyax:
            ax.set_ylabel(args.ylabel, fontsize=theylabelfontsize, fontweight="bold")
        else:
            ax.yaxis.set_visible(False)

    # fig.tight_layout()

    if args.outputfile is None:
        show()
    else:
        savefig(args.outputfile, bbox_inches="tight", dpi=args.saveres)
Esempio n. 11
0
def makeandsavehistogram(
    indata,
    histlen,
    endtrim,
    outname,
    binsize=None,
    displaytitle="histogram",
    displayplots=False,
    refine=False,
    therange=None,
    normalize=False,
    dictvarname=None,
    thedict=None,
    saveasbids=False,
    append=False,
    debug=False,
):
    """

    Parameters
    ----------
    indata
    histlen
    endtrim
    outname
    displaytitle
    displayplots
    refine
    therange
    normalize
    dictvarname
    thedict

    Returns
    -------

    """
    thehist, peakheight, peakloc, peakwidth, centerofmass = makehistogram(
        indata, histlen, binsize=binsize, therange=therange, refine=refine
    )
    thestore = np.zeros((2, len(thehist[0])), dtype="float64")
    thestore[0, :] = (thehist[1][1:] + thehist[1][0:-1]) / 2.0
    thestore[1, :] = thehist[0][-histlen:]
    if normalize:
        totalval = np.sum(thestore[1, :])
        if totalval != 0.0:
            thestore[1, :] /= totalval
    if dictvarname is None:
        varroot = outname
    else:
        varroot = dictvarname
    if thedict is None:
        tide_io.writenpvecs(np.array([centerofmass]), outname + "_centerofmass.txt")
        tide_io.writenpvecs(np.array([peakloc]), outname + "_peak.txt")
    else:
        thedict[varroot + "_centerofmass.txt"] = centerofmass
        thedict[varroot + "_peak.txt"] = peakloc
    if saveasbids:
        tide_io.writebidstsv(
            outname,
            np.transpose(thestore[1, :]),
            1.0 / (thestore[0, 1] - thestore[0, 0]),
            starttime=thestore[0, 0],
            columns=[varroot],
            append=append,
            debug=debug,
        )
    else:
        tide_io.writenpvecs(thestore, outname + ".txt")
    if displayplots:
        fig = plt.figure()
        ax = fig.add_subplot(111)
        ax.set_title(displaytitle)
        plt.plot(thestore[0, : (-1 - endtrim)], thestore[1, : (-1 - endtrim)])
        plt.show()