def upsample(inputdata, Fs_init, Fs_higher, method="univariate", intfac=False, debug=False): starttime = time.time() if Fs_higher <= Fs_init: print("upsample: target frequency must be higher than initial frequency") sys.exit() # upsample orig_x = np.linspace(0.0, (1.0 / Fs_init) * len(inputdata), num=len(inputdata), endpoint=False) endpoint = orig_x[-1] - orig_x[0] ts_higher = 1.0 / Fs_higher numresamppts = int(endpoint // ts_higher + 1) if intfac: numresamppts = int(Fs_higher // Fs_init) * len(inputdata) else: numresamppts = int(endpoint // ts_higher + 1) upsampled_x = np.arange(0.0, ts_higher * numresamppts, ts_higher) upsampled_y = doresample(orig_x, inputdata, upsampled_x, method=method) initfilter = tide_filt.NoncausalFilter( filtertype="arb", transferfunc="trapezoidal", debug=debug ) stopfreq = np.min([1.1 * Fs_init / 2.0, Fs_higher / 2.0]) initfilter.setfreqs(0.0, 0.0, Fs_init / 2.0, stopfreq) upsampled_y = initfilter.apply(Fs_higher, upsampled_y) if debug: print("upsampling took", time.time() - starttime, "seconds") return upsampled_y
def motionregress( themotionfilename, thedataarray, tr, orthogonalize=True, motstart=0, motend=-1, motionhp=None, motionlp=None, position=True, deriv=True, derivdelayed=False, debug=False, ): print("regressing out motion") motionregressors, motionregressorlabels = tide_io.calcmotregressors( tide_io.readmotion(themotionfilename), position=position, deriv=deriv, derivdelayed=derivdelayed, ) if motend == -1: motionregressors = motionregressors[:, motstart:] else: motionregressors = motionregressors[:, motstart:motend] if (motionlp is not None) or (motionhp is not None): mothpfilt = tide_filt.NoncausalFilter(filtertype="arb", transferfunc="trapezoidal") if motionlp is None: motionlp = 0.5 / tr else: motionlp = np.min([0.5 / tr, motionlp]) if motionhp is None: motionhp = 0.0 mothpfilt.setfreqs(0.9 * motionhp, motionhp, motionlp, np.min([0.5 / tr, motionlp * 1.1])) for i in range(motionregressors.shape[0]): motionregressors[i, :] = mothpfilt.apply(1.0 / tr, motionregressors[i, :]) if orthogonalize: motionregressors = tide_fit.gram_schmidt(motionregressors) initregressors = len(motionregressorlabels) motionregressorlabels = [] for theregressor in range(motionregressors.shape[0]): motionregressorlabels.append( "orthogmotion_{:02d}".format(theregressor)) print("After orthogonalization, {0} of {1} regressors remain.".format( len(motionregressorlabels), initregressors)) print("start motion filtering") filtereddata = confoundglm(thedataarray, motionregressors, debug=debug) print() print("motion filtering complete") return motionregressors, motionregressorlabels, filtereddata
def __init__( self, windowfunc="hamming", norm=True, madnorm=False, smoothingtime=-1.0, bins=20, sigma=0.25, *args, **kwargs, ): self.windowfunc = windowfunc self.norm = norm self.madnorm = madnorm self.bins = bins self.sigma = sigma self.smoothingtime = smoothingtime self.smoothingfilter = tide_filt.NoncausalFilter(filtertype="arb") if self.smoothingtime > 0.0: self.smoothingfilter.setfreqs(0.0, 0.0, 1.0 / self.smoothingtime, 1.0 / self.smoothingtime) super(MutualInformationator, self).__init__(*args, **kwargs)
def envdetect(Fs, inputdata, cutoff=0.25): """ Parameters ---------- Fs : float Sample frequency in Hz. inputdata : float array Data to be envelope detected cutoff : float Highest possible modulation frequency Returns ------- envelope : float array The envelope function """ demeaned = inputdata - np.mean(inputdata) sigabs = abs(demeaned) theenvbpf = tide_filt.NoncausalFilter(filtertype="arb") theenvbpf.setfreqs(0.0, 0.0, cutoff, 1.1 * cutoff) return theenvbpf.apply(Fs, sigabs)
def test_nullsimfunc(debug=False, display=False): # make the lfo filter lfofilter = tide_filt.NoncausalFilter(filtertype="lfo") # make the starting regressor timestep = 1.5 Fs = 1.0 / timestep # sourcelen = 1200 # sourcedata = lfofilter.apply(Fs, np.random.rand(sourcelen)) sourcedata = tide_io.readvecs( os.path.join(get_test_data_path(), "fmri_globalmean.txt"))[0] sourcelen = len(sourcedata) numpasses = 1 if display: plt.figure() plt.plot(sourcedata) plt.show() thexcorr = tide_corr.fastcorrelate(sourcedata, sourcedata) xcorrlen = len(thexcorr) xcorr_x = ( np.linspace(0.0, xcorrlen, xcorrlen, endpoint=False) * timestep - (xcorrlen * timestep) / 2.0 + timestep / 2.0) if display: plt.figure() plt.plot(xcorr_x, thexcorr) plt.show() corrzero = xcorrlen // 2 lagmin = -10 lagmax = 10 lagmininpts = int((-lagmin / timestep) - 0.5) lagmaxinpts = int((lagmax / timestep) + 0.5) searchstart = int(np.round(corrzero + lagmin / timestep)) searchend = int(np.round(corrzero + lagmax / timestep)) optiondict = { "numestreps": 10000, "showprogressbar": debug, "detrendorder": 3, "windowfunc": "hamming", "corrweighting": "None", "nprocs": 1, "widthlimit": 1000.0, "bipolar": False, "fixdelay": False, "peakfittype": "gauss", "lagmin": lagmin, "lagmax": lagmax, "absminsigma": 0.25, "absmaxsigma": 25.0, "edgebufferfrac": 0.0, "lthreshval": 0.0, "uthreshval": 1.0, "debug": False, "enforcethresh": True, "lagmod": 1000.0, "searchfrac": 0.5, "permutationmethod": "shuffle", "hardlimit": True, } theprefilter = tide_filt.NoncausalFilter("lfo") theCorrelator = tide_classes.Correlator( Fs=Fs, ncprefilter=theprefilter, detrendorder=optiondict["detrendorder"], windowfunc=optiondict["windowfunc"], corrweighting=optiondict["corrweighting"], ) thefitter = tide_classes.SimilarityFunctionFitter( lagmod=optiondict["lagmod"], lthreshval=optiondict["lthreshval"], uthreshval=optiondict["uthreshval"], bipolar=optiondict["bipolar"], lagmin=optiondict["lagmin"], lagmax=optiondict["lagmax"], absmaxsigma=optiondict["absmaxsigma"], absminsigma=optiondict["absminsigma"], debug=optiondict["debug"], peakfittype=optiondict["peakfittype"], searchfrac=optiondict["searchfrac"], enforcethresh=optiondict["enforcethresh"], hardlimit=optiondict["hardlimit"], ) if debug: print(optiondict) theCorrelator.setlimits(lagmininpts, lagmaxinpts) theCorrelator.setreftc(sourcedata) dummy, trimmedcorrscale, dummy = theCorrelator.getfunction() thefitter.setcorrtimeaxis(trimmedcorrscale) histograms = [] for thenprocs in [1, -1]: for i in range(numpasses): corrlist = tide_nullsimfunc.getNullDistributionDatax( sourcedata, Fs, theCorrelator, thefitter, despeckle_thresh=5.0, fixdelay=False, fixeddelayvalue=0.0, numestreps=optiondict["numestreps"], nprocs=thenprocs, showprogressbar=optiondict["showprogressbar"], chunksize=1000, permutationmethod=optiondict["permutationmethod"], ) tide_io.writenpvecs( corrlist, os.path.join(get_test_temp_path(), "corrdistdata.txt")) # calculate percentiles for the crosscorrelation from the distribution data histlen = 250 thepercentiles = [0.95, 0.99, 0.995] pcts, pcts_fit, histfit = tide_stats.sigFromDistributionData( corrlist, histlen, thepercentiles) if debug: tide_stats.printthresholds( pcts, thepercentiles, "Crosscorrelation significance thresholds from data:", ) tide_stats.printthresholds( pcts_fit, thepercentiles, "Crosscorrelation significance thresholds from fit:", ) ( thehist, peakheight, peakloc, peakwidth, centerofmass, ) = tide_stats.makehistogram(np.abs(corrlist), histlen, therange=[0.0, 1.0]) histograms.append(thehist) thestore = np.zeros((2, len(thehist[0])), dtype="float64") thestore[0, :] = (thehist[1][1:] + thehist[1][0:-1]) / 2.0 thestore[1, :] = thehist[0][-histlen:] if display: plt.figure() plt.plot(thestore[0, :], thestore[1, :]) plt.show() # tide_stats.makeandsavehistogram(corrlist, histlen, 0, # os.path.join(get_test_temp_path(), 'correlationhist'), # displaytitle='Null correlation histogram', # displayplots=display, refine=False) assert True
def test_calcsimfunc(debug=False, display=False): # make the lfo filter lfofilter = tide_filt.NoncausalFilter(filtertype="lfo") # make some data oversampfactor = 2 numvoxels = 100 numtimepoints = 500 tr = 0.72 Fs = 1.0 / tr init_fmri_x = np.linspace( 0.0, numtimepoints, numtimepoints, endpoint=False) * tr oversampfreq = oversampfactor * Fs os_fmri_x = np.linspace(0.0, numtimepoints * oversampfactor, numtimepoints * oversampfactor) * (1.0 / oversampfreq) theinputdata = np.zeros((numvoxels, numtimepoints), dtype=np.float64) meanval = np.zeros((numvoxels), dtype=np.float64) testfreq = 0.075 msethresh = 1e-3 # make the starting regressor sourcedata = np.sin(2.0 * np.pi * testfreq * os_fmri_x) numpasses = 1 # make the timeshifted data shiftstart = -5.0 shiftend = 5.0 voxelshifts = np.linspace(shiftstart, shiftend, numvoxels, endpoint=False) for i in range(numvoxels): theinputdata[i, :] = np.sin(2.0 * np.pi * testfreq * (init_fmri_x - voxelshifts[i])) if display: plt.figure() plt.plot(sourcedata) plt.show() genlagtc = tide_resample.FastResampler(os_fmri_x, sourcedata) thexcorr = tide_corr.fastcorrelate(sourcedata, sourcedata) xcorrlen = len(thexcorr) xcorr_x = (np.linspace(0.0, xcorrlen, xcorrlen, endpoint=False) * tr - (xcorrlen * tr) / 2.0 + tr / 2.0) if display: plt.figure() plt.plot(xcorr_x, thexcorr) plt.show() corrzero = xcorrlen // 2 lagmin = -10.0 lagmax = 10.0 lagmininpts = int((-lagmin * oversampfreq) - 0.5) lagmaxinpts = int((lagmax * oversampfreq) + 0.5) searchstart = int(np.round(corrzero + lagmin / tr)) searchend = int(np.round(corrzero + lagmax / tr)) numcorrpoints = lagmaxinpts + lagmininpts corrout = np.zeros((numvoxels, numcorrpoints), dtype=np.float64) lagmask = np.zeros((numvoxels), dtype=np.float64) failimage = np.zeros((numvoxels), dtype=np.float64) lagtimes = np.zeros((numvoxels), dtype=np.float64) lagstrengths = np.zeros((numvoxels), dtype=np.float64) lagsigma = np.zeros((numvoxels), dtype=np.float64) gaussout = np.zeros((numvoxels, numcorrpoints), dtype=np.float64) windowout = np.zeros((numvoxels, numcorrpoints), dtype=np.float64) R2 = np.zeros((numvoxels), dtype=np.float64) lagtc = np.zeros((numvoxels, numtimepoints), dtype=np.float64) optiondict = { "numestreps": 10000, "interptype": "univariate", "showprogressbar": debug, "detrendorder": 3, "windowfunc": "hamming", "corrweighting": "None", "nprocs": 1, "widthlimit": 1000.0, "bipolar": False, "fixdelay": False, "peakfittype": "gauss", "lagmin": lagmin, "lagmax": lagmax, "absminsigma": 0.25, "absmaxsigma": 25.0, "edgebufferfrac": 0.0, "lthreshval": 0.0, "uthreshval": 1.1, "debug": False, "enforcethresh": True, "lagmod": 1000.0, "searchfrac": 0.5, "mp_chunksize": 1000, "oversampfactor": oversampfactor, "despeckle_thresh": 5.0, "zerooutbadfit": False, "permutationmethod": "shuffle", "hardlimit": True, } theprefilter = tide_filt.NoncausalFilter("lfo") theCorrelator = tide_classes.Correlator( Fs=oversampfreq, ncprefilter=theprefilter, detrendorder=optiondict["detrendorder"], windowfunc=optiondict["windowfunc"], corrweighting=optiondict["corrweighting"], ) thefitter = tide_classes.SimilarityFunctionFitter( lagmod=optiondict["lagmod"], lthreshval=optiondict["lthreshval"], uthreshval=optiondict["uthreshval"], bipolar=optiondict["bipolar"], lagmin=optiondict["lagmin"], lagmax=optiondict["lagmax"], absmaxsigma=optiondict["absmaxsigma"], absminsigma=optiondict["absminsigma"], debug=optiondict["debug"], peakfittype=optiondict["peakfittype"], zerooutbadfit=optiondict["zerooutbadfit"], searchfrac=optiondict["searchfrac"], enforcethresh=optiondict["enforcethresh"], hardlimit=optiondict["hardlimit"], ) if debug: print(optiondict) theCorrelator.setlimits(lagmininpts, lagmaxinpts) theCorrelator.setreftc(sourcedata) dummy, trimmedcorrscale, dummy = theCorrelator.getfunction() thefitter.setcorrtimeaxis(trimmedcorrscale) for thenprocs in [1, -1]: for i in range(numpasses): ( voxelsprocessed_cp, theglobalmaxlist, trimmedcorrscale, ) = tide_calcsimfunc.correlationpass( theinputdata, sourcedata, theCorrelator, init_fmri_x, os_fmri_x, lagmininpts, lagmaxinpts, corrout, meanval, nprocs=thenprocs, oversampfactor=optiondict["oversampfactor"], interptype=optiondict["interptype"], showprogressbar=optiondict["showprogressbar"], chunksize=optiondict["mp_chunksize"], ) if display: plt.figure() plt.plot(trimmedcorrscale, corrout[numvoxels // 2, :], "k") plt.show() voxelsprocessed_fc = tide_simfuncfit.fitcorr( genlagtc, init_fmri_x, lagtc, trimmedcorrscale, thefitter, corrout, lagmask, failimage, lagtimes, lagstrengths, lagsigma, gaussout, windowout, R2, nprocs=optiondict["nprocs"], fixdelay=optiondict["fixdelay"], showprogressbar=optiondict["showprogressbar"], chunksize=optiondict["mp_chunksize"], despeckle_thresh=optiondict["despeckle_thresh"], ) if display: plt.figure() plt.plot(voxelshifts, "k") plt.plot(lagtimes, "r") plt.show() if debug: for i in range(numvoxels): print( voxelshifts[i], lagtimes[i], lagstrengths[i], lagsigma[i], failimage[i], ) assert mse(voxelshifts, lagtimes) < msethresh
def dotwostepresample( orig_x, orig_y, intermed_freq, final_freq, method="univariate", antialias=True, debug=False, ): """ Parameters ---------- orig_x orig_y intermed_freq final_freq method debug Returns ------- resampled_y """ if intermed_freq <= final_freq: print("intermediate frequency must be higher than final frequency") sys.exit() # upsample starttime = time.time() endpoint = orig_x[-1] - orig_x[0] init_freq = len(orig_x) / endpoint intermed_ts = 1.0 / intermed_freq numresamppts = int(endpoint // intermed_ts + 1) intermed_x = intermed_ts * np.linspace(0.0, 1.0 * numresamppts, numresamppts, endpoint=False) intermed_y = doresample(orig_x, orig_y, intermed_x, method=method) if debug: print( "init_freq, intermed_freq, final_freq:", init_freq, intermed_freq, final_freq, ) print("intermed_ts, numresamppts:", intermed_ts, numresamppts) print("upsampling took", time.time() - starttime, "seconds") # antialias and ringstop filter if antialias: starttime = time.time() aafilterfreq = np.min([final_freq, init_freq]) / 2.0 aafilter = tide_filt.NoncausalFilter( filtertype="arb", transferfunc="trapezoidal", debug=debug ) aafilter.setfreqs(0.0, 0.0, 0.95 * aafilterfreq, aafilterfreq) antialias_y = aafilter.apply(intermed_freq, intermed_y) if debug: print("antialiasing took", time.time() - starttime, "seconds") else: antialias_y = intermed_y # downsample starttime = time.time() final_ts = 1.0 / final_freq numresamppts = int(np.ceil(endpoint / final_ts)) # final_x = np.arange(0.0, final_ts * numresamppts, final_ts) final_x = final_ts * np.linspace(0.0, 1.0 * numresamppts, numresamppts, endpoint=False) resampled_y = doresample(intermed_x, antialias_y, final_x, method=method) if debug: print("downsampling took", time.time() - starttime, "seconds") return resampled_y
def doresample(orig_x, orig_y, new_x, method="cubic", padlen=0, antialias=False, debug=False): """ Resample data from one spacing to another. By default, does not apply any antialiasing filter. Parameters ---------- orig_x orig_y new_x method padlen Returns ------- """ tstep = orig_x[1] - orig_x[0] if padlen > 0: rawxpad = np.linspace(0.0, padlen * tstep, num=padlen, endpoint=False) frontpad = rawxpad + orig_x[0] - padlen * tstep backpad = rawxpad + orig_x[-1] + tstep pad_x = np.concatenate((frontpad, orig_x, backpad)) pad_y = tide_filt.padvec(orig_y, padlen=padlen) else: pad_x = orig_x pad_y = orig_y if debug: print("padlen=", padlen) print("tstep=", tstep) print("lens:", len(pad_x), len(pad_y)) print(pad_x) print(pad_y) fig = pl.figure() ax = fig.add_subplot(111) ax.set_title("Original and padded vector") pl.plot(orig_x, orig_y + 1.0, pad_x, pad_y) pl.show() # antialias and ringstop filter init_freq = len(pad_x) / (pad_x[-1] - pad_x[0]) final_freq = len(new_x) / (new_x[-1] - new_x[0]) if antialias and (init_freq > final_freq): aafilterfreq = final_freq / 2.0 aafilter = tide_filt.NoncausalFilter(filtertype="arb", transferfunc="trapezoidal") aafilter.setfreqs(0.0, 0.0, 0.95 * aafilterfreq, aafilterfreq) pad_y = aafilter.apply(init_freq, pad_y) if method == "cubic": cj = signal.cspline1d(pad_y) # return tide_filt.unpadvec( # np.float64(signal.cspline1d_eval(cj, new_x, dx=(orig_x[1] - orig_x[0]), x0=orig_x[0])), padlen=padlen) return signal.cspline1d_eval(cj, new_x, dx=(orig_x[1] - orig_x[0]), x0=orig_x[0]) elif method == "quadratic": qj = signal.qspline1d(pad_y) # return tide_filt.unpadvec( # np.float64(signal.qspline1d_eval(qj, new_x, dx=(orig_x[1] - orig_x[0]), x0=orig_x[0])), padlen=padlen) return signal.qspline1d_eval(qj, new_x, dx=(orig_x[1] - orig_x[0]), x0=orig_x[0]) elif method == "univariate": interpolator = sp.interpolate.UnivariateSpline(pad_x, pad_y, k=3, s=0) # s=0 interpolates # return tide_filt.unpadvec(np.float64(interpolator(new_x)), padlen=padlen) return np.float64(interpolator(new_x)) else: print("invalid interpolation method") return None
def test_delayestimation(display=False, debug=False): # set the number of MKL threads to use if mklexists: print("disabling MKL") mkl.set_num_threads(1) # set parameters Fs = 10.0 numpoints = 5000 numlocs = 21 refnum = int(numlocs // 2) timestep = 0.228764 oversampfac = 2 detrendorder = 1 oversampfreq = Fs * oversampfac corrtr = 1.0 / oversampfreq smoothingtime = 1.0 bipolar = False interptype = "univariate" lagmod = 1000.0 lagmin = -20.0 lagmax = 20.0 lagmininpts = int((-lagmin / corrtr) - 0.5) lagmaxinpts = int((lagmax / corrtr) + 0.5) peakfittype = "gauss" corrweighting = "None" similaritymetric = "hybrid" windowfunc = "hamming" chunksize = 5 pedestal = 100.0 # set up the filter theprefilter = tide_filt.NoncausalFilter("arb", transferfunc="brickwall", debug=False) theprefilter.setfreqs(0.009, 0.01, 0.15, 0.16) # construct the various test waveforms timepoints = np.linspace(0.0, numpoints / Fs, num=numpoints, endpoint=False) oversamptimepoints = np.linspace(0.0, numpoints / Fs, num=oversampfac * numpoints, endpoint=False) waveforms = np.zeros((numlocs, numpoints), dtype=np.float64) paramlist = [ [0.314, 0.055457, 0.0], [-0.723, 0.08347856, np.pi], [-0.834, 0.1102947, 0.0], [1.0, 0.13425, 0.5], ] offsets = np.zeros(numlocs, dtype=np.float64) amplitudes = np.ones(numlocs, dtype=np.float64) for i in range(numlocs): offsets[i] = timestep * (i - refnum) waveforms[i, :] = multisine(timepoints - offsets[i], paramlist) + pedestal if display: fig = plt.figure() ax = fig.add_subplot(1, 1, 1) for i in range(numlocs): ax.plot(timepoints, waveforms[i, :]) plt.show() threshval = pedestal / 4.0 waveforms = numpy2shared(waveforms, np.float64) referencetc = tide_resample.doresample(timepoints, waveforms[refnum, :], oversamptimepoints, method=interptype) referencetc = theprefilter.apply(oversampfreq, referencetc) referencetc = tide_math.corrnormalize(referencetc, detrendorder=detrendorder, windowfunc=windowfunc) # set up theCorrelator if debug: print("\n\nsetting up theCorrelator") theCorrelator = tide_classes.Correlator( Fs=oversampfreq, ncprefilter=theprefilter, detrendorder=detrendorder, windowfunc=windowfunc, corrweighting=corrweighting, debug=True, ) theCorrelator.setreftc( np.zeros((oversampfac * numpoints), dtype=np.float64)) theCorrelator.setlimits(lagmininpts, lagmaxinpts) dummy, trimmedcorrscale, dummy = theCorrelator.getfunction() corroutlen = np.shape(trimmedcorrscale)[0] internalvalidcorrshape = (numlocs, corroutlen) corrout, dummy, dummy = allocshared(internalvalidcorrshape, np.float64) meanval, dummy, dummy = allocshared((numlocs), np.float64) if debug: print("corrout shape:", corrout.shape) print("theCorrelator: corroutlen=", corroutlen) # set up theMutualInformationator if debug: print("\n\nsetting up theMutualInformationator") theMutualInformationator = tide_classes.MutualInformationator( Fs=oversampfreq, smoothingtime=smoothingtime, ncprefilter=theprefilter, detrendorder=detrendorder, windowfunc=windowfunc, madnorm=False, lagmininpts=lagmininpts, lagmaxinpts=lagmaxinpts, debug=False, ) theMutualInformationator.setreftc( np.zeros((oversampfac * numpoints), dtype=np.float64)) theMutualInformationator.setlimits(lagmininpts, lagmaxinpts) # set up thefitter if debug: print("\n\nsetting up thefitter") thefitter = tide_classes.SimilarityFunctionFitter( lagmod=lagmod, lthreshval=0.0, uthreshval=1.0, bipolar=bipolar, lagmin=lagmin, lagmax=lagmax, absmaxsigma=10000.0, absminsigma=0.01, debug=False, peakfittype=peakfittype, ) lagtc, dummy, dummy = allocshared(waveforms.shape, np.float64) fitmask, dummy, dummy = allocshared((numlocs), "uint16") failreason, dummy, dummy = allocshared((numlocs), "uint32") lagtimes, dummy, dummy = allocshared((numlocs), np.float64) lagstrengths, dummy, dummy = allocshared((numlocs), np.float64) lagsigma, dummy, dummy = allocshared((numlocs), np.float64) gaussout, dummy, dummy = allocshared(internalvalidcorrshape, np.float64) windowout, dummy, dummy = allocshared(internalvalidcorrshape, np.float64) rvalue, dummy, dummy = allocshared((numlocs), np.float64) r2value, dummy, dummy = allocshared((numlocs), np.float64) fitcoff, dummy, dummy = allocshared((numlocs), np.float64) fitNorm, dummy, dummy = allocshared((numlocs), np.float64) R2, dummy, dummy = allocshared((numlocs), np.float64) movingsignal, dummy, dummy = allocshared(waveforms.shape, np.float64) filtereddata, dummy, dummy = allocshared(waveforms.shape, np.float64) for nprocs in [4, 1]: # call correlationpass if debug: print("\n\ncalling correlationpass") print("waveforms shape:", waveforms.shape) ( voxelsprocessed_cp, theglobalmaxlist, trimmedcorrscale, ) = tide_calcsimfunc.correlationpass( waveforms[:, :], referencetc, theCorrelator, timepoints, oversamptimepoints, lagmininpts, lagmaxinpts, corrout, meanval, nprocs=nprocs, alwaysmultiproc=False, oversampfactor=oversampfac, interptype=interptype, showprogressbar=False, chunksize=chunksize, ) if debug: print(voxelsprocessed_cp, len(theglobalmaxlist), len(trimmedcorrscale)) if display: fig = plt.figure() ax = fig.add_subplot(1, 1, 1) for i in range(numlocs): ax.plot(trimmedcorrscale, corrout[i, :]) plt.show() # call peakeval if debug: print("\n\ncalling peakeval") voxelsprocessed_pe, thepeakdict = tide_peakeval.peakevalpass( waveforms[:, :], referencetc, timepoints, oversamptimepoints, theMutualInformationator, trimmedcorrscale, corrout, nprocs=nprocs, alwaysmultiproc=False, bipolar=bipolar, oversampfactor=oversampfac, interptype=interptype, showprogressbar=False, chunksize=chunksize, ) if debug: for key in thepeakdict: print(key, thepeakdict[key]) # call thefitter if debug: print("\n\ncalling fitter") thefitter.setfunctype(similaritymetric) thefitter.setcorrtimeaxis(trimmedcorrscale) genlagtc = tide_resample.FastResampler(timepoints, waveforms[refnum, :]) if display: fig = plt.figure() ax = fig.add_subplot(1, 1, 1) if nprocs == 1: proctype = "singleproc" else: proctype = "multiproc" for peakfittype in ["fastgauss", "quad", "fastquad", "gauss"]: thefitter.setpeakfittype(peakfittype) voxelsprocessed_fc = tide_simfuncfit.fitcorr( genlagtc, timepoints, lagtc, trimmedcorrscale, thefitter, corrout, fitmask, failreason, lagtimes, lagstrengths, lagsigma, gaussout, windowout, R2, peakdict=thepeakdict, nprocs=nprocs, alwaysmultiproc=False, fixdelay=None, showprogressbar=False, chunksize=chunksize, despeckle_thresh=100.0, initiallags=None, ) if debug: print(voxelsprocessed_fc) if debug: print("\npeakfittype:", peakfittype) for i in range(numlocs): print( "location", i, ":", offsets[i], lagtimes[i], lagtimes[i] - offsets[i], lagstrengths[i], lagsigma[i], ) if display: ax.plot(offsets, lagtimes, label=peakfittype) if checkfits(lagtimes, offsets, tolerance=0.01): print(proctype, peakfittype, " lagtime: pass") assert True else: print(proctype, peakfittype, " lagtime: fail") assert False if checkfits(lagstrengths, amplitudes, tolerance=0.05): print(proctype, peakfittype, " lagstrength: pass") assert True else: print(proctype, peakfittype, " lagstrength: fail") assert False if display: ax.legend() plt.show() filteredwaveforms, dummy, dummy = allocshared(waveforms.shape, np.float64) for i in range(numlocs): filteredwaveforms[i, :] = theprefilter.apply(Fs, waveforms[i, :]) for nprocs in [4, 1]: voxelsprocessed_glm = tide_glmpass.glmpass( numlocs, waveforms[:, :], threshval, lagtc, meanval, rvalue, r2value, fitcoff, fitNorm, movingsignal, filtereddata, nprocs=nprocs, alwaysmultiproc=False, showprogressbar=False, mp_chunksize=chunksize, ) if nprocs == 1: proctype = "singleproc" else: proctype = "multiproc" diffsignal = filtereddata fig = plt.figure() ax = fig.add_subplot(1, 1, 1) # ax.plot(timepoints, filtereddata[refnum, :], label='filtereddata') ax.plot(oversamptimepoints, referencetc, label="referencetc") ax.plot(timepoints, movingsignal[refnum, :], label="movingsignal") ax.legend() plt.show() print(proctype, "glmpass", np.mean(diffsignal), np.max(np.fabs(diffsignal)))
def postprocessfilteropts(args, debug=False): # configure the filter # set the trapezoidal flag, if using try: thetype = args.filtertype except AttributeError: args.filtertype = "trapezoidal" try: theorder = args.filtorder except AttributeError: args.filtorder = DEFAULT_FILTER_ORDER try: thepadseconds = args.padseconds except AttributeError: args.padseconds = DEFAULT_PAD_SECONDS # if passvec, or passvec and stopvec, are set, we are going set up an arbpass filter args.arbvec = None if debug: print("before preprocessing") print("\targs.arbvec:", args.arbvec) print("\targs.passvec:", args.passvec) print("\targs.stopvec:", args.stopvec) print("\targs.filterband:", args.filterband) if args.stopvec is not None: if args.passvec is not None: args.arbvec = [ args.passvec[0], args.passvec[1], args.stopvec[0], args.stopvec[1] ] else: raise ValueError( "--filterfreqs must be used if --filterstopfreqs is specified") else: if args.passvec is not None: args.arbvec = [ args.passvec[0], args.passvec[1], args.passvec[0] * 0.95, args.passvec[1] * 1.05, ] if args.arbvec is not None: # NOTE - this vector is LOWERPASS, UPPERPASS, LOWERSTOP, UPPERSTOP # setfreqs expects LOWERSTOP, LOWERPASS, UPPERPASS, UPPERSTOP theprefilter = tide_filt.NoncausalFilter( "arb", transferfunc=args.filtertype, ) theprefilter.setfreqs(args.arbvec[2], args.arbvec[0], args.arbvec[1], args.arbvec[3]) else: theprefilter = tide_filt.NoncausalFilter( args.filterband, transferfunc=args.filtertype, padtime=args.padseconds, ) # set the butterworth order theprefilter.setbutterorder(args.filtorder) if debug: print("before preprocessing") print("\targs.arbvec:", args.arbvec) print("\targs.passvec:", args.passvec) print("\targs.stopvec:", args.stopvec) print("\targs.filterband:", args.filterband) ( args.lowerstop, args.lowerpass, args.upperpass, args.upperstop, ) = theprefilter.getfreqs() if debug: print("after getfreqs") print("\targs.arbvec:", args.arbvec) return args, theprefilter