def set_timepoints_auto(self,start,end=10e50,step=5500,ch_num=0,width=100): """If triggers are not good, one can try to automatically find the timepoints. For fMRI, search for first volume artifact.""" #print "Setting timepoints automatically" assert ch_num<self._data.shape[1] and ch_num>=0, "ch_num is not valid" ts = [] t = int(start) offset=0 while t<self._data.shape[0]-step and t<end: if t==int(start): #template = self._data[t-width/2:t+width/2,ch_num] searchdata = abs(hilbert(self._data[t:t+500,ch_num])) searchdata = smooth(searchdata,self._slice_width/2) bp = np.where(searchdata>200)[0][0] t = (bp+searchdata[bp:bp+self._slice_width/2].argmax())-self._slice_width/2 template = self._data[t:t+step,ch_num] ts.append(t) else: #offset = find_max_overlap(template, self._data[t-width/2:t+width/2,ch_num], width/2) offset = find_max_overlap(template, self._data[t:t+step,ch_num], width/2) #print offset ts.append(t+offset) if debug: print ts[-1], t+=step+offset self.set_timepoints(ts) return ts
def find_slices_in_mean(self,num_slices = None,ch_num=None, width=None): """In the calculated average data, find all positions of slice-acquisition artifacts relative to trigger. Two rounds: first, use a smoothed version (lower risk of failure), second, improve values on unsmoothed signal.""" if num_slices==None: num_slices=self._num_slices if width==None: width=self._slice_width if ch_num==None: ch_num=self._ch_for_search #First round smooth_data = smooth(self._mean_data[:,ch_num]**2,width) #global test #test=smooth_data #smooth_data = (smooth_data**2).mean(axis=1) #if debug: #print smooth_data.shape, smooth_data #pylab.clf() #pylab.ioff() #pylab.plot(smooth_data[:]) #pylab.show() #raw_input() maxs = find_maxs(smooth_data,num_maxs = num_slices, width=width) if debug: print "Coarse Maxs: ", maxs template = self._mean_data[maxs[3]-width/2:maxs[3]+width/2] for i in range(len(maxs)): try: maxs[i] += find_max_overlap(template,self._mean_data[maxs[i]-width/2:maxs[i]+width/2]) except Exception, e: print "Error while refining max-position no. ", i, ", ", e
def find_slices_in_mean(self, num_slices=None, ch_num=None, width=None): """In the calculated average data, find all positions of slice-acquisition artifacts relative to trigger. Two rounds: first, use a smoothed version (lower risk of failure), second, improve values on unsmoothed signal.""" if num_slices == None: num_slices = self._num_slices if width == None: width = self._slice_width if ch_num == None: ch_num = self._ch_for_search #First round smooth_data = smooth(self._mean_data[:, ch_num]**2, width) #global test #test=smooth_data #smooth_data = (smooth_data**2).mean(axis=1) #if debug: #print smooth_data.shape, smooth_data #pylab.clf() #pylab.ioff() #pylab.plot(smooth_data[:]) #pylab.show() #raw_input() maxs = find_maxs(smooth_data, num_maxs=num_slices, width=width) if debug: print "Coarse Maxs: ", maxs template = self._mean_data[maxs[3] - width / 2:maxs[3] + width / 2] for i in range(len(maxs)): try: maxs[i] += find_max_overlap( template, self._mean_data[maxs[i] - width / 2:maxs[i] + width / 2]) except Exception, e: print "Error while refining max-position no. ", i, ", ", e
def test_DeltaN10W100(): #arange data = _make_data_with_delta(100) #act data_smooth = smooth(data, 10) #assert assert data.shape == data_smooth.shape assert _check_data_decay_from_center(data_smooth)
def test_DeltaN10W100(): # arange data = _make_data_with_delta(100) # act data_smooth = smooth(data, 10) # assert assert data.shape == data_smooth.shape assert _check_data_decay_from_center(data_smooth)
def test_DeltaN100W10FlatWindow(): n = 100 w = 10 # arange data = _make_data_with_delta(n) # act data_smooth = smooth(data, w, "flat") # assert assert data.shape == data_smooth.shape assert_array_almost_equal(data_smooth[n / 2 - w / 2 + 1 : n / 2 - w / 2 + 1 + w], np.ones((w)) / w)
def test_DeltaN100W10FlatWindow(): n = 100 w = 10 #arange data = _make_data_with_delta(n) #act data_smooth = smooth(data, w, "flat") #assert assert data.shape == data_smooth.shape assert_array_almost_equal( data_smooth[n / 2 - w / 2 + 1:n / 2 - w / 2 + 1 + w], np.ones((w)) / w)
def integrate(self,ts): #First, calculate EEG-timecourse (independently from BOLD) self._ts = ts self._Fs = 1./(ts[1]-ts[0]) self._signal_rstc = self._RSTC.integrate(ts)[:,::3] self._spls = [] for in_bd in self._input_bands: tmp = filtfilt_band(in_bd[0],in_bd[1],self._signal_rstc[:,0],Fs=self._Fs,border=2) power = abs(hilbert(tmp))**2 smooth_power = smooth(power,int(round(self._smooth_width*self._Fs))) self._spls.append(splrep(ts,smooth_power)) #print "Anzahl spls:", len(self._spls) rv = odeint(self.ode,self.y,self._ts) return rv
def integrate(self,ts): #First, calculate EEG-timecourse (independently from BOLD) self._ts = ts self._Fs = 1./(ts[1]-ts[0]) self._signal_rstc = self._RSTC.integrate(ts)[:,:] print self._signal_rstc.shape self._spls = np.zeros((len(self._input_bands),self._n_nodes),"O") for i_b, in_bd in enumerate(self._input_bands): for i_n in range(self._n_nodes): tmp = filtfilt_band(in_bd[0],in_bd[1],self._signal_rstc[:,i_n],Fs=self._Fs,border=2) power = abs(hilbert(tmp))**2 smooth_power = smooth(power,int(round(self._smooth_width*self._Fs))) self._spls[i_b,i_n] = splrep(ts,smooth_power) #print "Anzahl spls:", len(self._spls) rv = odeint(self.ode,self.y,self._ts) return rv
def set_timepoints_auto(self, start, end=10e50, step=5500, ch_num=0, width=100): """If triggers are not good, one can try to automatically find the timepoints. For fMRI, search for first volume artifact.""" #print "Setting timepoints automatically" assert ch_num < self._data.shape[ 1] and ch_num >= 0, "ch_num is not valid" ts = [] t = int(start) offset = 0 while t < self._data.shape[0] - step and t < end: if t == int(start): #template = self._data[t-width/2:t+width/2,ch_num] searchdata = abs(hilbert(self._data[t:t + 500, ch_num])) searchdata = smooth(searchdata, self._slice_width / 2) bp = np.where(searchdata > 200)[0][0] t = (bp + searchdata[bp:bp + self._slice_width / 2].argmax() ) - self._slice_width / 2 template = self._data[t:t + step, ch_num] ts.append(t) else: #offset = find_max_overlap(template, self._data[t-width/2:t+width/2,ch_num], width/2) offset = find_max_overlap(template, self._data[t:t + step, ch_num], width / 2) #print offset ts.append(t + offset) if debug: print ts[-1], t += step + offset self.set_timepoints(ts) return ts
#p.show() # time.sleep(0.2) inputs = [] states = [] refs = [] for i in range(5): #inputs.append(np.diag(np.ones((10))*np.random.random())) inputs.append(np.random.random((10, 10))) #states.append(np.zeros((10,10))) states.append(inputs[-1]) refs.append((-1) * np.ones((10, 10))) for i in range(5): inputs.append(np.random.random((10, 10))) #inputs.append(np.diag(np.ones((10))*np.random.random())) inputs[-1][:] = smooth(inputs[-1], 9) #+0.1 #states.append(np.zeros((10,10))) states.append(inputs[-1]) refs.append(np.ones((10, 10)) * (1)) ct = CNNTrainer(t_sim=40, errfunc="mean_abs_diff", opt_method="simplex") #ct = CNNTrainer(t_sim=40,errfunc="sum_sqr_diff", opt_method="powell") #ct = CNNTrainer(t_sim=40,errfunc="mean_abs_diff", opt_method="anneal") a, b, z = ct(inputs, states, refs) #p.ioff() #print "Test" p.hot() p.figure(1) for i in range(10): print i, cnn = CNN2d(inputs[i], states[i], a, b, z) try:
#p.show() # time.sleep(0.2) inputs = [] states = [] refs = [] for i in range(5): #inputs.append(np.diag(np.ones((10))*np.random.random())) inputs.append(np.random.random((10,10))) #states.append(np.zeros((10,10))) states.append(inputs[-1]) refs.append((-1)*np.ones((10,10))) for i in range(5): inputs.append(np.random.random((10,10))) #inputs.append(np.diag(np.ones((10))*np.random.random())) inputs[-1][:] = smooth(inputs[-1],9)#+0.1 #states.append(np.zeros((10,10))) states.append(inputs[-1]) refs.append(np.ones((10,10))*(1)) ct = CNNTrainer(t_sim=40,errfunc="mean_abs_diff", opt_method="simplex") #ct = CNNTrainer(t_sim=40,errfunc="sum_sqr_diff", opt_method="powell") #ct = CNNTrainer(t_sim=40,errfunc="mean_abs_diff", opt_method="anneal") a,b,z = ct(inputs,states,refs) #p.ioff() #print "Test" p.hot() p.figure(1) for i in range(10): print i, cnn = CNN2d(inputs[i],states[i],a,b,z) try: